├── ARM64Assembler.h ├── ARMAssembler.cpp ├── ARMAssembler.h ├── ARMv7Assembler.h ├── AbortReason.h ├── AbstractMacroAssembler.h ├── AllowMacroScratchRegisterUsage.h ├── AssemblerBuffer.h ├── AssemblerBufferWithConstantPool.h ├── AssemblerCommon.h ├── COPYING.LIB ├── CodeLocation.h ├── DisallowMacroScratchRegisterUsage.h ├── ExecutableAllocator.cpp ├── ExecutableAllocator.h ├── JITCompilationEffort.h ├── LinkBuffer.cpp ├── LinkBuffer.h ├── MIPSAssembler.h ├── MacroAssembler.cpp ├── MacroAssembler.h ├── MacroAssemblerARM.cpp ├── MacroAssemblerARM.h ├── MacroAssemblerARM64.cpp ├── MacroAssemblerARM64.h ├── MacroAssemblerARMv7.cpp ├── MacroAssemblerARMv7.h ├── MacroAssemblerCodeRef.cpp ├── MacroAssemblerCodeRef.h ├── MacroAssemblerMIPS.h ├── MacroAssemblerPrinter.cpp ├── MacroAssemblerPrinter.h ├── MacroAssemblerSH4.h ├── MacroAssemblerX86.h ├── MacroAssemblerX86Common.cpp ├── MacroAssemblerX86Common.h ├── MacroAssemblerX86_64.h ├── MaxFrameExtentForSlowPathCall.h ├── README.md ├── SH4Assembler.h └── X86Assembler.h /ARMAssembler.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2009 University of Szeged 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY 15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR 18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #include "config.h" 28 | 29 | #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 30 | 31 | #include "ARMAssembler.h" 32 | 33 | namespace JSC { 34 | 35 | // Patching helpers 36 | 37 | void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr) 38 | { 39 | ARMWord *ldr = reinterpret_cast(loadAddr); 40 | ARMWord diff = reinterpret_cast(constPoolAddr) - ldr; 41 | ARMWord index = (*ldr & 0xfff) >> 1; 42 | 43 | ASSERT(diff >= 1); 44 | if (diff >= 2 || index > 0) { 45 | diff = (diff + index - 2) * sizeof(ARMWord); 46 | ASSERT(diff <= 0xfff); 47 | *ldr = (*ldr & ~0xfff) | diff; 48 | } else 49 | *ldr = (*ldr & ~(0xfff | ARMAssembler::DataTransferUp)) | sizeof(ARMWord); 50 | } 51 | 52 | // Handle immediates 53 | 54 | ARMWord ARMAssembler::getOp2(ARMWord imm) 55 | { 56 | int rol; 57 | 58 | if (imm <= 0xff) 59 | return Op2Immediate | imm; 60 | 61 | if ((imm & 0xff000000) == 0) { 62 | imm <<= 8; 63 | rol = 8; 64 | } 65 | else { 66 | imm = (imm << 24) | (imm >> 8); 67 | rol = 0; 68 | } 69 | 70 | if ((imm & 0xff000000) == 0) { 71 | imm <<= 8; 72 | rol += 4; 73 | } 74 | 75 | if ((imm & 0xf0000000) == 0) { 76 | imm <<= 4; 77 | rol += 2; 78 | } 79 | 80 | if ((imm & 0xc0000000) == 0) { 81 | imm <<= 2; 82 | rol += 1; 83 | } 84 | 85 | if ((imm & 0x00ffffff) == 0) 86 | return Op2Immediate | (imm >> 24) | (rol << 8); 87 | 88 | return InvalidImmediate; 89 | } 90 | 91 | int ARMAssembler::genInt(int reg, ARMWord imm, bool positive) 92 | { 93 | // Step1: Search a non-immediate part 94 | ARMWord mask; 95 | ARMWord imm1; 96 | ARMWord imm2; 97 | int rol; 98 | 99 | mask = 0xff000000; 100 | rol = 8; 101 | while(1) { 102 | if ((imm & mask) == 0) { 103 | imm = (imm << rol) | (imm >> (32 - rol)); 104 | rol = 4 + (rol >> 1); 105 | break; 106 | } 107 | rol += 2; 108 | mask >>= 2; 109 | if (mask & 0x3) { 110 | // rol 8 111 | imm = (imm << 8) | (imm >> 24); 112 | mask = 0xff00; 113 | rol = 24; 114 | while (1) { 115 | if ((imm & mask) == 0) { 116 | imm = (imm << rol) | (imm >> (32 - rol)); 117 | rol = (rol >> 1) - 8; 118 | break; 119 | } 120 | rol += 2; 121 | mask >>= 2; 122 | if (mask & 0x3) 123 | return 0; 124 | } 125 | break; 126 | } 127 | } 128 | 129 | ASSERT((imm & 0xff) == 0); 130 | 131 | if ((imm & 0xff000000) == 0) { 132 | imm1 = Op2Immediate | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8); 133 | imm2 = Op2Immediate | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8); 134 | } else if (imm & 0xc0000000) { 135 | imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); 136 | imm <<= 8; 137 | rol += 4; 138 | 139 | if ((imm & 0xff000000) == 0) { 140 | imm <<= 8; 141 | rol += 4; 142 | } 143 | 144 | if ((imm & 0xf0000000) == 0) { 145 | imm <<= 4; 146 | rol += 2; 147 | } 148 | 149 | if ((imm & 0xc0000000) == 0) { 150 | imm <<= 2; 151 | rol += 1; 152 | } 153 | 154 | if ((imm & 0x00ffffff) == 0) 155 | imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8); 156 | else 157 | return 0; 158 | } else { 159 | if ((imm & 0xf0000000) == 0) { 160 | imm <<= 4; 161 | rol += 2; 162 | } 163 | 164 | if ((imm & 0xc0000000) == 0) { 165 | imm <<= 2; 166 | rol += 1; 167 | } 168 | 169 | imm1 = Op2Immediate | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); 170 | imm <<= 8; 171 | rol += 4; 172 | 173 | if ((imm & 0xf0000000) == 0) { 174 | imm <<= 4; 175 | rol += 2; 176 | } 177 | 178 | if ((imm & 0xc0000000) == 0) { 179 | imm <<= 2; 180 | rol += 1; 181 | } 182 | 183 | if ((imm & 0x00ffffff) == 0) 184 | imm2 = Op2Immediate | (imm >> 24) | ((rol & 0xf) << 8); 185 | else 186 | return 0; 187 | } 188 | 189 | if (positive) { 190 | mov(reg, imm1); 191 | orr(reg, reg, imm2); 192 | } else { 193 | mvn(reg, imm1); 194 | bic(reg, reg, imm2); 195 | } 196 | 197 | return 1; 198 | } 199 | 200 | ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert) 201 | { 202 | ARMWord tmp; 203 | 204 | // Do it by 1 instruction 205 | tmp = getOp2(imm); 206 | if (tmp != InvalidImmediate) 207 | return tmp; 208 | 209 | tmp = getOp2(~imm); 210 | if (tmp != InvalidImmediate) { 211 | if (invert) 212 | return tmp | Op2InvertedImmediate; 213 | mvn(tmpReg, tmp); 214 | return tmpReg; 215 | } 216 | 217 | return encodeComplexImm(imm, tmpReg); 218 | } 219 | 220 | void ARMAssembler::moveImm(ARMWord imm, int dest) 221 | { 222 | ARMWord tmp; 223 | 224 | // Do it by 1 instruction 225 | tmp = getOp2(imm); 226 | if (tmp != InvalidImmediate) { 227 | mov(dest, tmp); 228 | return; 229 | } 230 | 231 | tmp = getOp2(~imm); 232 | if (tmp != InvalidImmediate) { 233 | mvn(dest, tmp); 234 | return; 235 | } 236 | 237 | encodeComplexImm(imm, dest); 238 | } 239 | 240 | ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest) 241 | { 242 | #if WTF_ARM_ARCH_AT_LEAST(7) 243 | ARMWord tmp = getImm16Op2(imm); 244 | if (tmp != InvalidImmediate) { 245 | movw(dest, tmp); 246 | return dest; 247 | } 248 | movw(dest, getImm16Op2(imm & 0xffff)); 249 | movt(dest, getImm16Op2(imm >> 16)); 250 | return dest; 251 | #else 252 | // Do it by 2 instruction 253 | if (genInt(dest, imm, true)) 254 | return dest; 255 | if (genInt(dest, ~imm, false)) 256 | return dest; 257 | 258 | ldrImmediate(dest, imm); 259 | return dest; 260 | #endif 261 | } 262 | 263 | // Memory load/store helpers 264 | 265 | void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset) 266 | { 267 | if (offset >= 0) { 268 | if (offset <= 0xfff) 269 | dtrUp(transferType, srcDst, base, offset); 270 | else if (offset <= 0xfffff) { 271 | add(ARMRegisters::S0, base, Op2Immediate | (offset >> 12) | (10 << 8)); 272 | dtrUp(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff)); 273 | } else { 274 | moveImm(offset, ARMRegisters::S0); 275 | dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0); 276 | } 277 | } else { 278 | if (offset >= -0xfff) 279 | dtrDown(transferType, srcDst, base, -offset); 280 | else if (offset >= -0xfffff) { 281 | sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 12) | (10 << 8)); 282 | dtrDown(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff)); 283 | } else { 284 | moveImm(offset, ARMRegisters::S0); 285 | dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0); 286 | } 287 | } 288 | } 289 | 290 | void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) 291 | { 292 | ASSERT(scale >= 0 && scale <= 3); 293 | ARMWord op2 = lsl(index, scale); 294 | 295 | if (!offset) { 296 | dtrUpRegister(transferType, srcDst, base, op2); 297 | return; 298 | } 299 | 300 | if (offset <= 0xfffff && offset >= -0xfffff) { 301 | add(ARMRegisters::S0, base, op2); 302 | dataTransfer32(transferType, srcDst, ARMRegisters::S0, offset); 303 | return; 304 | } 305 | 306 | moveImm(offset, ARMRegisters::S0); 307 | add(ARMRegisters::S0, ARMRegisters::S0, op2); 308 | dtrUpRegister(transferType, srcDst, base, ARMRegisters::S0); 309 | } 310 | 311 | void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset) 312 | { 313 | if (offset >= 0) { 314 | if (offset <= 0xff) 315 | halfDtrUp(transferType, srcDst, base, getOp2Half(offset)); 316 | else if (offset <= 0xffff) { 317 | add(ARMRegisters::S0, base, Op2Immediate | (offset >> 8) | (12 << 8)); 318 | halfDtrUp(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff)); 319 | } else { 320 | moveImm(offset, ARMRegisters::S0); 321 | halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0); 322 | } 323 | } else { 324 | if (offset >= -0xff) 325 | halfDtrDown(transferType, srcDst, base, getOp2Half(-offset)); 326 | else if (offset >= -0xffff) { 327 | sub(ARMRegisters::S0, base, Op2Immediate | (-offset >> 8) | (12 << 8)); 328 | halfDtrDown(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff)); 329 | } else { 330 | moveImm(offset, ARMRegisters::S0); 331 | halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0); 332 | } 333 | } 334 | } 335 | 336 | void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) 337 | { 338 | if (!scale && !offset) { 339 | halfDtrUpRegister(transferType, srcDst, base, index); 340 | return; 341 | } 342 | 343 | ARMWord op2 = lsl(index, scale); 344 | 345 | if (offset <= 0xffff && offset >= -0xffff) { 346 | add(ARMRegisters::S0, base, op2); 347 | dataTransfer16(transferType, srcDst, ARMRegisters::S0, offset); 348 | return; 349 | } 350 | 351 | moveImm(offset, ARMRegisters::S0); 352 | add(ARMRegisters::S0, ARMRegisters::S0, op2); 353 | halfDtrUpRegister(transferType, srcDst, base, ARMRegisters::S0); 354 | } 355 | 356 | void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset) 357 | { 358 | // VFP cannot directly access memory that is not four-byte-aligned 359 | if (!(offset & 0x3)) { 360 | if (offset <= 0x3ff && offset >= 0) { 361 | doubleDtrUp(transferType, srcDst, base, offset >> 2); 362 | return; 363 | } 364 | if (offset <= 0x3ffff && offset >= 0) { 365 | add(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8)); 366 | doubleDtrUp(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); 367 | return; 368 | } 369 | offset = -offset; 370 | 371 | if (offset <= 0x3ff && offset >= 0) { 372 | doubleDtrDown(transferType, srcDst, base, offset >> 2); 373 | return; 374 | } 375 | if (offset <= 0x3ffff && offset >= 0) { 376 | sub(ARMRegisters::S0, base, Op2Immediate | (offset >> 10) | (11 << 8)); 377 | doubleDtrDown(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); 378 | return; 379 | } 380 | offset = -offset; 381 | } 382 | 383 | moveImm(offset, ARMRegisters::S0); 384 | add(ARMRegisters::S0, ARMRegisters::S0, base); 385 | doubleDtrUp(transferType, srcDst, ARMRegisters::S0, 0); 386 | } 387 | 388 | void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) 389 | { 390 | add(ARMRegisters::S1, base, lsl(index, scale)); 391 | dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset); 392 | } 393 | 394 | void ARMAssembler::prepareExecutableCopy(void* to) 395 | { 396 | // 64-bit alignment is required for next constant pool and JIT code as well 397 | m_buffer.flushWithoutBarrier(true); 398 | 399 | char* data = reinterpret_cast(m_buffer.data()); 400 | ptrdiff_t delta = reinterpret_cast(to) - data; 401 | 402 | for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) { 403 | // The last bit is set if the constant must be placed on constant pool. 404 | int pos = (iter->m_offset) & (~0x1); 405 | ARMWord* ldrAddr = reinterpret_cast_ptr(data + pos); 406 | ARMWord* addr = getLdrImmAddress(ldrAddr); 407 | if (*addr != InvalidBranchTarget) { 408 | if (!(iter->m_offset & 1)) { 409 | intptr_t difference = reinterpret_cast_ptr(data + *addr) - (ldrAddr + DefaultPrefetchOffset); 410 | 411 | if ((difference <= MaximumBranchOffsetDistance && difference >= MinimumBranchOffsetDistance)) { 412 | *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BranchOffsetMask); 413 | continue; 414 | } 415 | } 416 | *addr = reinterpret_cast(data + delta + *addr); 417 | } 418 | } 419 | } 420 | 421 | } // namespace JSC 422 | 423 | #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 424 | -------------------------------------------------------------------------------- /AbortReason.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2014-2016 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef AbortReason_h 27 | #define AbortReason_h 28 | 29 | namespace JSC { 30 | 31 | // It's important to not change the values of existing abort reasons unless we really 32 | // have to. For this reason there is a BASIC-style numbering that should allow us to 33 | // sneak new reasons in without changing the numbering of existing reasons - at least 34 | // for a while. 35 | enum AbortReason { 36 | AHCallFrameMisaligned = 10, 37 | AHIndexingTypeIsValid = 20, 38 | AHInsaneArgumentCount = 30, 39 | AHIsNotCell = 40, 40 | AHIsNotInt32 = 50, 41 | AHIsNotJSDouble = 60, 42 | AHIsNotJSInt32 = 70, 43 | AHIsNotJSNumber = 80, 44 | AHIsNotNull = 90, 45 | AHStackPointerMisaligned = 100, 46 | AHStructureIDIsValid = 110, 47 | AHTagMaskNotInPlace = 120, 48 | AHTagTypeNumberNotInPlace = 130, 49 | AHTypeInfoInlineTypeFlagsAreValid = 140, 50 | AHTypeInfoIsValid = 150, 51 | B3Oops = 155, 52 | DFGBailedAtTopOfBlock = 161, 53 | DFGBailedAtEndOfNode = 162, 54 | DFGBasicStorageAllocatorZeroSize = 170, 55 | DFGIsNotCell = 180, 56 | DFGIneffectiveWatchpoint = 190, 57 | DFGNegativeStringLength = 200, 58 | DFGSlowPathGeneratorFellThrough = 210, 59 | DFGUnreachableBasicBlock = 220, 60 | DFGUnreachableNode = 225, 61 | DFGUnreasonableOSREntryJumpDestination = 230, 62 | DFGVarargsThrowingPathDidNotThrow = 235, 63 | FTLCrash = 236, 64 | JITDidReturnFromTailCall = 237, 65 | JITDivOperandsAreNotNumbers = 240, 66 | JITGetByValResultIsNotEmpty = 250, 67 | JITNotSupported = 260, 68 | JITOffsetIsNotOutOfLine = 270, 69 | JITUncoughtExceptionAfterCall = 275, 70 | JITUnexpectedCallFrameSize = 277, 71 | JITUnreasonableLoopHintJumpTarget = 280, 72 | RPWUnreasonableJumpTarget = 290, 73 | RepatchIneffectiveWatchpoint = 300, 74 | RepatchInsaneArgumentCount = 310, 75 | TGInvalidPointer = 320, 76 | TGNotSupported = 330, 77 | YARRNoInputConsumed = 340, 78 | }; 79 | 80 | } // namespace JSC 81 | 82 | #endif // AbortReason_h 83 | 84 | -------------------------------------------------------------------------------- /AllowMacroScratchRegisterUsage.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef AllowMacroScratchRegisterUsage_h 27 | #define AllowMacroScratchRegisterUsage_h 28 | 29 | #if ENABLE(ASSEMBLER) 30 | 31 | #include "MacroAssembler.h" 32 | 33 | namespace JSC { 34 | 35 | class AllowMacroScratchRegisterUsage { 36 | public: 37 | AllowMacroScratchRegisterUsage(MacroAssembler& masm) 38 | : m_masm(masm) 39 | , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister) 40 | { 41 | masm.m_allowScratchRegister = true; 42 | } 43 | 44 | ~AllowMacroScratchRegisterUsage() 45 | { 46 | m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister; 47 | } 48 | 49 | private: 50 | MacroAssembler& m_masm; 51 | bool m_oldValueOfAllowScratchRegister; 52 | }; 53 | 54 | } // namespace JSC 55 | 56 | #endif // ENABLE(ASSEMBLER) 57 | 58 | #endif // AllowMacroScratchRegisterUsage_h 59 | 60 | -------------------------------------------------------------------------------- /AssemblerBuffer.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef AssemblerBuffer_h 27 | #define AssemblerBuffer_h 28 | 29 | #if ENABLE(ASSEMBLER) 30 | 31 | #include "ExecutableAllocator.h" 32 | #include "JITCompilationEffort.h" 33 | #include "stdint.h" 34 | #include 35 | #include 36 | #include 37 | #include 38 | 39 | namespace JSC { 40 | 41 | struct AssemblerLabel { 42 | AssemblerLabel() 43 | : m_offset(std::numeric_limits::max()) 44 | { 45 | } 46 | 47 | explicit AssemblerLabel(uint32_t offset) 48 | : m_offset(offset) 49 | { 50 | } 51 | 52 | bool isSet() const { return (m_offset != std::numeric_limits::max()); } 53 | 54 | AssemblerLabel labelAtOffset(int offset) const 55 | { 56 | return AssemblerLabel(m_offset + offset); 57 | } 58 | 59 | bool operator==(const AssemblerLabel& other) const { return m_offset == other.m_offset; } 60 | 61 | uint32_t m_offset; 62 | }; 63 | 64 | class AssemblerData { 65 | WTF_MAKE_NONCOPYABLE(AssemblerData); 66 | static const size_t InlineCapacity = 128; 67 | public: 68 | AssemblerData() 69 | : m_buffer(m_inlineBuffer) 70 | , m_capacity(InlineCapacity) 71 | { 72 | } 73 | 74 | AssemblerData(size_t initialCapacity) 75 | { 76 | if (initialCapacity <= InlineCapacity) { 77 | m_capacity = InlineCapacity; 78 | m_buffer = m_inlineBuffer; 79 | } else { 80 | m_capacity = initialCapacity; 81 | m_buffer = static_cast(fastMalloc(m_capacity)); 82 | } 83 | } 84 | 85 | AssemblerData(AssemblerData&& other) 86 | { 87 | if (other.isInlineBuffer()) { 88 | ASSERT(other.m_capacity == InlineCapacity); 89 | memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity); 90 | m_buffer = m_inlineBuffer; 91 | } else 92 | m_buffer = other.m_buffer; 93 | m_capacity = other.m_capacity; 94 | 95 | other.m_buffer = nullptr; 96 | other.m_capacity = 0; 97 | } 98 | 99 | AssemblerData& operator=(AssemblerData&& other) 100 | { 101 | if (m_buffer && !isInlineBuffer()) 102 | fastFree(m_buffer); 103 | 104 | if (other.isInlineBuffer()) { 105 | ASSERT(other.m_capacity == InlineCapacity); 106 | memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity); 107 | m_buffer = m_inlineBuffer; 108 | } else 109 | m_buffer = other.m_buffer; 110 | m_capacity = other.m_capacity; 111 | 112 | other.m_buffer = nullptr; 113 | other.m_capacity = 0; 114 | return *this; 115 | } 116 | 117 | ~AssemblerData() 118 | { 119 | if (m_buffer && !isInlineBuffer()) 120 | fastFree(m_buffer); 121 | } 122 | 123 | char* buffer() const { return m_buffer; } 124 | 125 | unsigned capacity() const { return m_capacity; } 126 | 127 | void grow(unsigned extraCapacity = 0) 128 | { 129 | m_capacity = m_capacity + m_capacity / 2 + extraCapacity; 130 | if (isInlineBuffer()) { 131 | m_buffer = static_cast(fastMalloc(m_capacity)); 132 | memcpy(m_buffer, m_inlineBuffer, InlineCapacity); 133 | } else 134 | m_buffer = static_cast(fastRealloc(m_buffer, m_capacity)); 135 | } 136 | 137 | private: 138 | bool isInlineBuffer() const { return m_buffer == m_inlineBuffer; } 139 | char* m_buffer; 140 | char m_inlineBuffer[InlineCapacity]; 141 | unsigned m_capacity; 142 | }; 143 | 144 | class AssemblerBuffer { 145 | public: 146 | AssemblerBuffer() 147 | : m_storage() 148 | , m_index(0) 149 | { 150 | } 151 | 152 | bool isAvailable(unsigned space) 153 | { 154 | return m_index + space <= m_storage.capacity(); 155 | } 156 | 157 | void ensureSpace(unsigned space) 158 | { 159 | while (!isAvailable(space)) 160 | outOfLineGrow(); 161 | } 162 | 163 | bool isAligned(int alignment) const 164 | { 165 | return !(m_index & (alignment - 1)); 166 | } 167 | 168 | void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); } 169 | void putByte(int8_t value) { putIntegral(value); } 170 | void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); } 171 | void putShort(int16_t value) { putIntegral(value); } 172 | void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); } 173 | void putInt(int32_t value) { putIntegral(value); } 174 | void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); } 175 | void putInt64(int64_t value) { putIntegral(value); } 176 | 177 | void* data() const 178 | { 179 | return m_storage.buffer(); 180 | } 181 | 182 | size_t codeSize() const 183 | { 184 | return m_index; 185 | } 186 | 187 | void setCodeSize(size_t index) 188 | { 189 | // Warning: Only use this if you know exactly what you are doing. 190 | // For example, say you want 40 bytes of nops, it's ok to grow 191 | // and then fill 40 bytes of nops using bigger instructions. 192 | m_index = index; 193 | ASSERT(m_index <= m_storage.capacity()); 194 | } 195 | 196 | AssemblerLabel label() const 197 | { 198 | return AssemblerLabel(m_index); 199 | } 200 | 201 | unsigned debugOffset() { return m_index; } 202 | 203 | AssemblerData&& releaseAssemblerData() { return WTFMove(m_storage); } 204 | 205 | // LocalWriter is a trick to keep the storage buffer and the index 206 | // in memory while issuing multiple Stores. 207 | // It is created in a block scope and its attribute can stay live 208 | // between writes. 209 | // 210 | // LocalWriter *CANNOT* be mixed with other types of access to AssemblerBuffer. 211 | // AssemblerBuffer cannot be used until its LocalWriter goes out of scope. 212 | class LocalWriter { 213 | public: 214 | LocalWriter(AssemblerBuffer& buffer, unsigned requiredSpace) 215 | : m_buffer(buffer) 216 | { 217 | buffer.ensureSpace(requiredSpace); 218 | m_storageBuffer = buffer.m_storage.buffer(); 219 | m_index = buffer.m_index; 220 | #if !defined(NDEBUG) 221 | m_initialIndex = m_index; 222 | m_requiredSpace = requiredSpace; 223 | #endif 224 | } 225 | 226 | ~LocalWriter() 227 | { 228 | ASSERT(m_index - m_initialIndex <= m_requiredSpace); 229 | ASSERT(m_buffer.m_index == m_initialIndex); 230 | ASSERT(m_storageBuffer == m_buffer.m_storage.buffer()); 231 | m_buffer.m_index = m_index; 232 | } 233 | 234 | void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); } 235 | void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); } 236 | void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); } 237 | void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); } 238 | private: 239 | template 240 | void putIntegralUnchecked(IntegralType value) 241 | { 242 | ASSERT(m_index + sizeof(IntegralType) <= m_buffer.m_storage.capacity()); 243 | *reinterpret_cast_ptr(m_storageBuffer + m_index) = value; 244 | m_index += sizeof(IntegralType); 245 | } 246 | AssemblerBuffer& m_buffer; 247 | char* m_storageBuffer; 248 | unsigned m_index; 249 | #if !defined(NDEBUG) 250 | unsigned m_initialIndex; 251 | unsigned m_requiredSpace; 252 | #endif 253 | }; 254 | 255 | protected: 256 | template 257 | void putIntegral(IntegralType value) 258 | { 259 | unsigned nextIndex = m_index + sizeof(IntegralType); 260 | if (UNLIKELY(nextIndex > m_storage.capacity())) 261 | outOfLineGrow(); 262 | ASSERT(isAvailable(sizeof(IntegralType))); 263 | *reinterpret_cast_ptr(m_storage.buffer() + m_index) = value; 264 | m_index = nextIndex; 265 | } 266 | 267 | template 268 | void putIntegralUnchecked(IntegralType value) 269 | { 270 | ASSERT(isAvailable(sizeof(IntegralType))); 271 | *reinterpret_cast_ptr(m_storage.buffer() + m_index) = value; 272 | m_index += sizeof(IntegralType); 273 | } 274 | 275 | void append(const char* data, int size) 276 | { 277 | if (!isAvailable(size)) 278 | grow(size); 279 | 280 | memcpy(m_storage.buffer() + m_index, data, size); 281 | m_index += size; 282 | } 283 | 284 | void grow(int extraCapacity = 0) 285 | { 286 | m_storage.grow(extraCapacity); 287 | } 288 | 289 | private: 290 | NEVER_INLINE void outOfLineGrow() 291 | { 292 | m_storage.grow(); 293 | } 294 | 295 | friend LocalWriter; 296 | 297 | AssemblerData m_storage; 298 | unsigned m_index; 299 | }; 300 | 301 | } // namespace JSC 302 | 303 | #endif // ENABLE(ASSEMBLER) 304 | 305 | #endif // AssemblerBuffer_h 306 | -------------------------------------------------------------------------------- /AssemblerBufferWithConstantPool.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2009 University of Szeged 3 | * All rights reserved. 4 | * 5 | * Redistribution and use in source and binary forms, with or without 6 | * modification, are permitted provided that the following conditions 7 | * are met: 8 | * 1. Redistributions of source code must retain the above copyright 9 | * notice, this list of conditions and the following disclaimer. 10 | * 2. Redistributions in binary form must reproduce the above copyright 11 | * notice, this list of conditions and the following disclaimer in the 12 | * documentation and/or other materials provided with the distribution. 13 | * 14 | * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY 15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR 18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #ifndef AssemblerBufferWithConstantPool_h 28 | #define AssemblerBufferWithConstantPool_h 29 | 30 | #if ENABLE(ASSEMBLER) 31 | 32 | #include "AssemblerBuffer.h" 33 | #include 34 | 35 | #define ASSEMBLER_HAS_CONSTANT_POOL 1 36 | 37 | namespace JSC { 38 | 39 | /* 40 | On a constant pool 4 or 8 bytes data can be stored. The values can be 41 | constants or addresses. The addresses should be 32 or 64 bits. The constants 42 | should be double-precisions float or integer numbers which are hard to be 43 | encoded as few machine instructions. 44 | 45 | TODO: The pool is desinged to handle both 32 and 64 bits values, but 46 | currently only the 4 bytes constants are implemented and tested. 47 | 48 | The AssemblerBuffer can contain multiple constant pools. Each pool is inserted 49 | into the instruction stream - protected by a jump instruction from the 50 | execution flow. 51 | 52 | The flush mechanism is called when no space remain to insert the next instruction 53 | into the pool. Three values are used to determine when the constant pool itself 54 | have to be inserted into the instruction stream (Assembler Buffer): 55 | 56 | - maxPoolSize: size of the constant pool in bytes, this value cannot be 57 | larger than the maximum offset of a PC relative memory load 58 | 59 | - barrierSize: size of jump instruction in bytes which protects the 60 | constant pool from execution 61 | 62 | - maxInstructionSize: maximum length of a machine instruction in bytes 63 | 64 | There are some callbacks which solve the target architecture specific 65 | address handling: 66 | 67 | - TYPE patchConstantPoolLoad(TYPE load, int value): 68 | patch the 'load' instruction with the index of the constant in the 69 | constant pool and return the patched instruction. 70 | 71 | - void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr): 72 | patch the a PC relative load instruction at 'loadAddr' address with the 73 | final relative offset. The offset can be computed with help of 74 | 'constPoolAddr' (the address of the constant pool) and index of the 75 | constant (which is stored previously in the load instruction itself). 76 | 77 | - TYPE placeConstantPoolBarrier(int size): 78 | return with a constant pool barrier instruction which jumps over the 79 | constant pool. 80 | 81 | The 'put*WithConstant*' functions should be used to place a data into the 82 | constant pool. 83 | */ 84 | 85 | template 86 | class AssemblerBufferWithConstantPool : public AssemblerBuffer { 87 | typedef SegmentedVector LoadOffsets; 88 | using AssemblerBuffer::putIntegral; 89 | using AssemblerBuffer::putIntegralUnchecked; 90 | public: 91 | typedef struct { 92 | short high; 93 | short low; 94 | } TwoShorts; 95 | 96 | enum { 97 | UniqueConst, 98 | ReusableConst, 99 | UnusedEntry, 100 | }; 101 | 102 | AssemblerBufferWithConstantPool() 103 | : AssemblerBuffer() 104 | , m_numConsts(0) 105 | , m_maxDistance(maxPoolSize) 106 | , m_lastConstDelta(0) 107 | { 108 | m_pool = static_cast(fastMalloc(maxPoolSize)); 109 | m_mask = static_cast(fastMalloc(maxPoolSize / sizeof(uint32_t))); 110 | } 111 | 112 | ~AssemblerBufferWithConstantPool() 113 | { 114 | fastFree(m_mask); 115 | fastFree(m_pool); 116 | } 117 | 118 | void ensureSpace(int space) 119 | { 120 | flushIfNoSpaceFor(space); 121 | AssemblerBuffer::ensureSpace(space); 122 | } 123 | 124 | void ensureSpace(int insnSpace, int constSpace) 125 | { 126 | flushIfNoSpaceFor(insnSpace, constSpace); 127 | AssemblerBuffer::ensureSpace(insnSpace); 128 | } 129 | 130 | void ensureSpaceForAnyInstruction(int amount = 1) 131 | { 132 | flushIfNoSpaceFor(amount * maxInstructionSize, amount * sizeof(uint64_t)); 133 | } 134 | 135 | bool isAligned(int alignment) 136 | { 137 | flushIfNoSpaceFor(alignment); 138 | return AssemblerBuffer::isAligned(alignment); 139 | } 140 | 141 | void putByteUnchecked(int value) 142 | { 143 | AssemblerBuffer::putByteUnchecked(value); 144 | correctDeltas(1); 145 | } 146 | 147 | void putByte(int value) 148 | { 149 | flushIfNoSpaceFor(1); 150 | AssemblerBuffer::putByte(value); 151 | correctDeltas(1); 152 | } 153 | 154 | void putShortUnchecked(int value) 155 | { 156 | AssemblerBuffer::putShortUnchecked(value); 157 | correctDeltas(2); 158 | } 159 | 160 | void putShort(int value) 161 | { 162 | flushIfNoSpaceFor(2); 163 | AssemblerBuffer::putShort(value); 164 | correctDeltas(2); 165 | } 166 | 167 | void putIntUnchecked(int value) 168 | { 169 | AssemblerBuffer::putIntUnchecked(value); 170 | correctDeltas(4); 171 | } 172 | 173 | void putInt(int value) 174 | { 175 | flushIfNoSpaceFor(4); 176 | AssemblerBuffer::putInt(value); 177 | correctDeltas(4); 178 | } 179 | 180 | void putInt64Unchecked(int64_t value) 181 | { 182 | AssemblerBuffer::putInt64Unchecked(value); 183 | correctDeltas(8); 184 | } 185 | 186 | void putIntegral(TwoShorts value) 187 | { 188 | putIntegral(value.high); 189 | putIntegral(value.low); 190 | } 191 | 192 | void putIntegralUnchecked(TwoShorts value) 193 | { 194 | putIntegralUnchecked(value.high); 195 | putIntegralUnchecked(value.low); 196 | } 197 | 198 | void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false) 199 | { 200 | putIntegralWithConstantInt(insn, constant, isReusable); 201 | } 202 | 203 | void putIntWithConstantInt(uint32_t insn, uint32_t constant, bool isReusable = false) 204 | { 205 | putIntegralWithConstantInt(insn, constant, isReusable); 206 | } 207 | 208 | // This flushing mechanism can be called after any unconditional jumps. 209 | void flushWithoutBarrier(bool isForced = false) 210 | { 211 | // Flush if constant pool is more than 60% full to avoid overuse of this function. 212 | if (isForced || 5 * static_cast(m_numConsts) > 3 * maxPoolSize / sizeof(uint32_t)) 213 | flushConstantPool(false); 214 | } 215 | 216 | uint32_t* poolAddress() 217 | { 218 | return m_pool; 219 | } 220 | 221 | int sizeOfConstantPool() 222 | { 223 | return m_numConsts; 224 | } 225 | 226 | void flushConstantPool(bool useBarrier = true) 227 | { 228 | if (!m_numConsts) 229 | return; 230 | int alignPool = (codeSize() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1); 231 | 232 | if (alignPool) 233 | alignPool = sizeof(uint64_t) - alignPool; 234 | 235 | // Callback to protect the constant pool from execution 236 | if (useBarrier) 237 | putIntegral(AssemblerType::placeConstantPoolBarrier(m_numConsts * sizeof(uint32_t) + alignPool)); 238 | 239 | if (alignPool) { 240 | if (alignPool & 1) 241 | AssemblerBuffer::putByte(AssemblerType::padForAlign8); 242 | if (alignPool & 2) 243 | AssemblerBuffer::putShort(AssemblerType::padForAlign16); 244 | if (alignPool & 4) 245 | AssemblerBuffer::putInt(AssemblerType::padForAlign32); 246 | } 247 | 248 | int constPoolOffset = codeSize(); 249 | append(reinterpret_cast(m_pool), m_numConsts * sizeof(uint32_t)); 250 | 251 | // Patch each PC relative load 252 | for (LoadOffsets::Iterator iter = m_loadOffsets.begin(); iter != m_loadOffsets.end(); ++iter) { 253 | void* loadAddr = reinterpret_cast(data()) + *iter; 254 | AssemblerType::patchConstantPoolLoad(loadAddr, reinterpret_cast(data()) + constPoolOffset); 255 | } 256 | 257 | m_loadOffsets.clear(); 258 | m_numConsts = 0; 259 | } 260 | 261 | private: 262 | void correctDeltas(int insnSize) 263 | { 264 | m_maxDistance -= insnSize; 265 | m_lastConstDelta -= insnSize; 266 | if (m_lastConstDelta < 0) 267 | m_lastConstDelta = 0; 268 | } 269 | 270 | void correctDeltas(int insnSize, int constSize) 271 | { 272 | correctDeltas(insnSize); 273 | 274 | m_maxDistance -= m_lastConstDelta; 275 | m_lastConstDelta = constSize; 276 | } 277 | 278 | template 279 | void putIntegralWithConstantInt(IntegralType insn, uint32_t constant, bool isReusable) 280 | { 281 | if (!m_numConsts) 282 | m_maxDistance = maxPoolSize; 283 | flushIfNoSpaceFor(sizeof(IntegralType), 4); 284 | 285 | m_loadOffsets.append(codeSize()); 286 | if (isReusable) { 287 | for (int i = 0; i < m_numConsts; ++i) { 288 | if (m_mask[i] == ReusableConst && m_pool[i] == constant) { 289 | putIntegral(static_cast(AssemblerType::patchConstantPoolLoad(insn, i))); 290 | correctDeltas(sizeof(IntegralType)); 291 | return; 292 | } 293 | } 294 | } 295 | 296 | m_pool[m_numConsts] = constant; 297 | m_mask[m_numConsts] = static_cast(isReusable ? ReusableConst : UniqueConst); 298 | 299 | putIntegral(static_cast(AssemblerType::patchConstantPoolLoad(insn, m_numConsts))); 300 | ++m_numConsts; 301 | 302 | correctDeltas(sizeof(IntegralType), 4); 303 | } 304 | 305 | void flushIfNoSpaceFor(int nextInsnSize) 306 | { 307 | if (m_numConsts == 0) 308 | return; 309 | int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0; 310 | if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t))) 311 | flushConstantPool(); 312 | } 313 | 314 | void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize) 315 | { 316 | if (m_numConsts == 0) 317 | return; 318 | if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) || 319 | (m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize)) 320 | flushConstantPool(); 321 | } 322 | 323 | uint32_t* m_pool; 324 | char* m_mask; 325 | LoadOffsets m_loadOffsets; 326 | 327 | int m_numConsts; 328 | int m_maxDistance; 329 | int m_lastConstDelta; 330 | }; 331 | 332 | } // namespace JSC 333 | 334 | #endif // ENABLE(ASSEMBLER) 335 | 336 | #endif // AssemblerBufferWithConstantPool_h 337 | -------------------------------------------------------------------------------- /AssemblerCommon.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef AssemblerCommon_h 27 | #define AssemblerCommon_h 28 | 29 | namespace JSC { 30 | 31 | ALWAYS_INLINE bool isIOS() 32 | { 33 | #if PLATFORM(IOS) 34 | return true; 35 | #else 36 | return false; 37 | #endif 38 | } 39 | 40 | ALWAYS_INLINE bool isInt9(int32_t value) 41 | { 42 | return value == ((value << 23) >> 23); 43 | } 44 | 45 | template 46 | ALWAYS_INLINE bool isUInt12(Type value) 47 | { 48 | return !(value & ~static_cast(0xfff)); 49 | } 50 | 51 | template 52 | ALWAYS_INLINE bool isValidScaledUImm12(int32_t offset) 53 | { 54 | int32_t maxPImm = 4095 * (datasize / 8); 55 | if (offset < 0) 56 | return false; 57 | if (offset > maxPImm) 58 | return false; 59 | if (offset & ((datasize / 8) - 1)) 60 | return false; 61 | return true; 62 | } 63 | 64 | ALWAYS_INLINE bool isValidSignedImm9(int32_t value) 65 | { 66 | return isInt9(value); 67 | } 68 | 69 | class ARM64LogicalImmediate { 70 | public: 71 | static ARM64LogicalImmediate create32(uint32_t value) 72 | { 73 | // Check for 0, -1 - these cannot be encoded. 74 | if (!value || !~value) 75 | return InvalidLogicalImmediate; 76 | 77 | // First look for a 32-bit pattern, then for repeating 16-bit 78 | // patterns, 8-bit, 4-bit, and finally 2-bit. 79 | 80 | unsigned hsb, lsb; 81 | bool inverted; 82 | if (findBitRange<32>(value, hsb, lsb, inverted)) 83 | return encodeLogicalImmediate<32>(hsb, lsb, inverted); 84 | 85 | if ((value & 0xffff) != (value >> 16)) 86 | return InvalidLogicalImmediate; 87 | value &= 0xffff; 88 | 89 | if (findBitRange<16>(value, hsb, lsb, inverted)) 90 | return encodeLogicalImmediate<16>(hsb, lsb, inverted); 91 | 92 | if ((value & 0xff) != (value >> 8)) 93 | return InvalidLogicalImmediate; 94 | value &= 0xff; 95 | 96 | if (findBitRange<8>(value, hsb, lsb, inverted)) 97 | return encodeLogicalImmediate<8>(hsb, lsb, inverted); 98 | 99 | if ((value & 0xf) != (value >> 4)) 100 | return InvalidLogicalImmediate; 101 | value &= 0xf; 102 | 103 | if (findBitRange<4>(value, hsb, lsb, inverted)) 104 | return encodeLogicalImmediate<4>(hsb, lsb, inverted); 105 | 106 | if ((value & 0x3) != (value >> 2)) 107 | return InvalidLogicalImmediate; 108 | value &= 0x3; 109 | 110 | if (findBitRange<2>(value, hsb, lsb, inverted)) 111 | return encodeLogicalImmediate<2>(hsb, lsb, inverted); 112 | 113 | return InvalidLogicalImmediate; 114 | } 115 | 116 | static ARM64LogicalImmediate create64(uint64_t value) 117 | { 118 | // Check for 0, -1 - these cannot be encoded. 119 | if (!value || !~value) 120 | return InvalidLogicalImmediate; 121 | 122 | // Look for a contiguous bit range. 123 | unsigned hsb, lsb; 124 | bool inverted; 125 | if (findBitRange<64>(value, hsb, lsb, inverted)) 126 | return encodeLogicalImmediate<64>(hsb, lsb, inverted); 127 | 128 | // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern. 129 | if (static_cast(value) == static_cast(value >> 32)) 130 | return create32(static_cast(value)); 131 | return InvalidLogicalImmediate; 132 | } 133 | 134 | int value() const 135 | { 136 | ASSERT(isValid()); 137 | return m_value; 138 | } 139 | 140 | bool isValid() const 141 | { 142 | return m_value != InvalidLogicalImmediate; 143 | } 144 | 145 | bool is64bit() const 146 | { 147 | return m_value & (1 << 12); 148 | } 149 | 150 | private: 151 | ARM64LogicalImmediate(int value) 152 | : m_value(value) 153 | { 154 | } 155 | 156 | // Generate a mask with bits in the range hsb..0 set, for example: 157 | // hsb:63 = 0xffffffffffffffff 158 | // hsb:42 = 0x000007ffffffffff 159 | // hsb: 0 = 0x0000000000000001 160 | static uint64_t mask(unsigned hsb) 161 | { 162 | ASSERT(hsb < 64); 163 | return 0xffffffffffffffffull >> (63 - hsb); 164 | } 165 | 166 | template 167 | static void partialHSB(uint64_t& value, unsigned&result) 168 | { 169 | if (value & (0xffffffffffffffffull << N)) { 170 | result += N; 171 | value >>= N; 172 | } 173 | } 174 | 175 | // Find the bit number of the highest bit set in a non-zero value, for example: 176 | // 0x8080808080808080 = hsb:63 177 | // 0x0000000000000001 = hsb: 0 178 | // 0x000007ffffe00000 = hsb:42 179 | static unsigned highestSetBit(uint64_t value) 180 | { 181 | ASSERT(value); 182 | unsigned hsb = 0; 183 | partialHSB<32>(value, hsb); 184 | partialHSB<16>(value, hsb); 185 | partialHSB<8>(value, hsb); 186 | partialHSB<4>(value, hsb); 187 | partialHSB<2>(value, hsb); 188 | partialHSB<1>(value, hsb); 189 | return hsb; 190 | } 191 | 192 | // This function takes a value and a bit width, where value obeys the following constraints: 193 | // * bits outside of the width of the value must be zero. 194 | // * bits within the width of value must neither be all clear or all set. 195 | // The input is inspected to detect values that consist of either two or three contiguous 196 | // ranges of bits. The output range hsb..lsb will describe the second range of the value. 197 | // if the range is set, inverted will be false, and if the range is clear, inverted will 198 | // be true. For example (with width 8): 199 | // 00001111 = hsb:3, lsb:0, inverted:false 200 | // 11110000 = hsb:3, lsb:0, inverted:true 201 | // 00111100 = hsb:5, lsb:2, inverted:false 202 | // 11000011 = hsb:5, lsb:2, inverted:true 203 | template 204 | static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted) 205 | { 206 | ASSERT(value & mask(width - 1)); 207 | ASSERT(value != mask(width - 1)); 208 | ASSERT(!(value & ~mask(width - 1))); 209 | 210 | // Detect cases where the top bit is set; if so, flip all the bits & set invert. 211 | // This halves the number of patterns we need to look for. 212 | const uint64_t msb = 1ull << (width - 1); 213 | if ((inverted = (value & msb))) 214 | value ^= mask(width - 1); 215 | 216 | // Find the highest set bit in value, generate a corresponding mask & flip all 217 | // bits under it. 218 | hsb = highestSetBit(value); 219 | value ^= mask(hsb); 220 | if (!value) { 221 | // If this cleared the value, then the range hsb..0 was all set. 222 | lsb = 0; 223 | return true; 224 | } 225 | 226 | // Try making one more mask, and flipping the bits! 227 | lsb = highestSetBit(value); 228 | value ^= mask(lsb); 229 | if (!value) { 230 | // Success - but lsb actually points to the hsb of a third range - add one 231 | // to get to the lsb of the mid range. 232 | ++lsb; 233 | return true; 234 | } 235 | 236 | return false; 237 | } 238 | 239 | // Encodes the set of immN:immr:imms fields found in a logical immediate. 240 | template 241 | static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted) 242 | { 243 | // Check width is a power of 2! 244 | ASSERT(!(width & (width -1))); 245 | ASSERT(width <= 64 && width >= 2); 246 | ASSERT(hsb >= lsb); 247 | ASSERT(hsb < width); 248 | 249 | int immN = 0; 250 | int imms = 0; 251 | int immr = 0; 252 | 253 | // For 64-bit values this is easy - just set immN to true, and imms just 254 | // contains the bit number of the highest set bit of the set range. For 255 | // values with narrower widths, these are encoded by a leading set of 256 | // one bits, followed by a zero bit, followed by the remaining set of bits 257 | // being the high bit of the range. For a 32-bit immediate there are no 258 | // leading one bits, just a zero followed by a five bit number. For a 259 | // 16-bit immediate there is one one bit, a zero bit, and then a four bit 260 | // bit-position, etc. 261 | if (width == 64) 262 | immN = 1; 263 | else 264 | imms = 63 & ~(width + width - 1); 265 | 266 | if (inverted) { 267 | // if width is 64 & hsb is 62, then we have a value something like: 268 | // 0x80000000ffffffff (in this case with lsb 32). 269 | // The ror should be by 1, imms (effectively set width minus 1) is 270 | // 32. Set width is full width minus cleared width. 271 | immr = (width - 1) - hsb; 272 | imms |= (width - ((hsb - lsb) + 1)) - 1; 273 | } else { 274 | // if width is 64 & hsb is 62, then we have a value something like: 275 | // 0x7fffffff00000000 (in this case with lsb 32). 276 | // The value is effectively rol'ed by lsb, which is equivalent to 277 | // a ror by width - lsb (or 0, in the case where lsb is 0). imms 278 | // is hsb - lsb. 279 | immr = (width - lsb) & (width - 1); 280 | imms |= hsb - lsb; 281 | } 282 | 283 | return immN << 12 | immr << 6 | imms; 284 | } 285 | 286 | static const int InvalidLogicalImmediate = -1; 287 | 288 | int m_value; 289 | }; 290 | 291 | 292 | } // namespace JSC. 293 | 294 | #endif // AssemblerCommon_h 295 | -------------------------------------------------------------------------------- /CodeLocation.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2009 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef CodeLocation_h 27 | #define CodeLocation_h 28 | 29 | #include "MacroAssemblerCodeRef.h" 30 | 31 | #if ENABLE(ASSEMBLER) 32 | 33 | namespace JSC { 34 | 35 | enum NearCallMode { Regular, Tail }; 36 | 37 | class CodeLocationInstruction; 38 | class CodeLocationLabel; 39 | class CodeLocationJump; 40 | class CodeLocationCall; 41 | class CodeLocationNearCall; 42 | class CodeLocationDataLabelCompact; 43 | class CodeLocationDataLabel32; 44 | class CodeLocationDataLabelPtr; 45 | class CodeLocationConvertibleLoad; 46 | 47 | // The CodeLocation* types are all pretty much do-nothing wrappers around 48 | // CodePtr (or MacroAssemblerCodePtr, to give it its full name). These 49 | // classes only exist to provide type-safety when linking and patching code. 50 | // 51 | // The one new piece of functionallity introduced by these classes is the 52 | // ability to create (or put another way, to re-discover) another CodeLocation 53 | // at an offset from one you already know. When patching code to optimize it 54 | // we often want to patch a number of instructions that are short, fixed 55 | // offsets apart. To reduce memory overhead we will only retain a pointer to 56 | // one of the instructions, and we will use the *AtOffset methods provided by 57 | // CodeLocationCommon to find the other points in the code to modify. 58 | class CodeLocationCommon : public MacroAssemblerCodePtr { 59 | public: 60 | CodeLocationInstruction instructionAtOffset(int offset); 61 | CodeLocationLabel labelAtOffset(int offset); 62 | CodeLocationJump jumpAtOffset(int offset); 63 | CodeLocationCall callAtOffset(int offset); 64 | CodeLocationNearCall nearCallAtOffset(int offset, NearCallMode); 65 | CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset); 66 | CodeLocationDataLabel32 dataLabel32AtOffset(int offset); 67 | CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset); 68 | CodeLocationConvertibleLoad convertibleLoadAtOffset(int offset); 69 | 70 | protected: 71 | CodeLocationCommon() 72 | { 73 | } 74 | 75 | CodeLocationCommon(MacroAssemblerCodePtr location) 76 | : MacroAssemblerCodePtr(location) 77 | { 78 | } 79 | }; 80 | 81 | class CodeLocationInstruction : public CodeLocationCommon { 82 | public: 83 | CodeLocationInstruction() {} 84 | explicit CodeLocationInstruction(MacroAssemblerCodePtr location) 85 | : CodeLocationCommon(location) {} 86 | explicit CodeLocationInstruction(void* location) 87 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} 88 | }; 89 | 90 | class CodeLocationLabel : public CodeLocationCommon { 91 | public: 92 | CodeLocationLabel() {} 93 | explicit CodeLocationLabel(MacroAssemblerCodePtr location) 94 | : CodeLocationCommon(location) {} 95 | explicit CodeLocationLabel(void* location) 96 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} 97 | }; 98 | 99 | class CodeLocationJump : public CodeLocationCommon { 100 | public: 101 | CodeLocationJump() {} 102 | explicit CodeLocationJump(MacroAssemblerCodePtr location) 103 | : CodeLocationCommon(location) {} 104 | explicit CodeLocationJump(void* location) 105 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} 106 | }; 107 | 108 | class CodeLocationCall : public CodeLocationCommon { 109 | public: 110 | CodeLocationCall() {} 111 | explicit CodeLocationCall(MacroAssemblerCodePtr location) 112 | : CodeLocationCommon(location) {} 113 | explicit CodeLocationCall(void* location) 114 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} 115 | }; 116 | 117 | class CodeLocationNearCall : public CodeLocationCommon { 118 | public: 119 | CodeLocationNearCall() {} 120 | explicit CodeLocationNearCall(MacroAssemblerCodePtr location, NearCallMode callMode) 121 | : CodeLocationCommon(location), m_callMode(callMode) { } 122 | explicit CodeLocationNearCall(void* location, NearCallMode callMode) 123 | : CodeLocationCommon(MacroAssemblerCodePtr(location)), m_callMode(callMode) { } 124 | NearCallMode callMode() { return m_callMode; } 125 | private: 126 | NearCallMode m_callMode = NearCallMode::Regular; 127 | }; 128 | 129 | class CodeLocationDataLabel32 : public CodeLocationCommon { 130 | public: 131 | CodeLocationDataLabel32() {} 132 | explicit CodeLocationDataLabel32(MacroAssemblerCodePtr location) 133 | : CodeLocationCommon(location) {} 134 | explicit CodeLocationDataLabel32(void* location) 135 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} 136 | }; 137 | 138 | class CodeLocationDataLabelCompact : public CodeLocationCommon { 139 | public: 140 | CodeLocationDataLabelCompact() { } 141 | explicit CodeLocationDataLabelCompact(MacroAssemblerCodePtr location) 142 | : CodeLocationCommon(location) { } 143 | explicit CodeLocationDataLabelCompact(void* location) 144 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) { } 145 | }; 146 | 147 | class CodeLocationDataLabelPtr : public CodeLocationCommon { 148 | public: 149 | CodeLocationDataLabelPtr() {} 150 | explicit CodeLocationDataLabelPtr(MacroAssemblerCodePtr location) 151 | : CodeLocationCommon(location) {} 152 | explicit CodeLocationDataLabelPtr(void* location) 153 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} 154 | }; 155 | 156 | class CodeLocationConvertibleLoad : public CodeLocationCommon { 157 | public: 158 | CodeLocationConvertibleLoad() { } 159 | explicit CodeLocationConvertibleLoad(MacroAssemblerCodePtr location) 160 | : CodeLocationCommon(location) { } 161 | explicit CodeLocationConvertibleLoad(void* location) 162 | : CodeLocationCommon(MacroAssemblerCodePtr(location)) { } 163 | }; 164 | 165 | inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset) 166 | { 167 | ASSERT_VALID_CODE_OFFSET(offset); 168 | return CodeLocationInstruction(reinterpret_cast(dataLocation()) + offset); 169 | } 170 | 171 | inline CodeLocationLabel CodeLocationCommon::labelAtOffset(int offset) 172 | { 173 | ASSERT_VALID_CODE_OFFSET(offset); 174 | return CodeLocationLabel(reinterpret_cast(dataLocation()) + offset); 175 | } 176 | 177 | inline CodeLocationJump CodeLocationCommon::jumpAtOffset(int offset) 178 | { 179 | ASSERT_VALID_CODE_OFFSET(offset); 180 | return CodeLocationJump(reinterpret_cast(dataLocation()) + offset); 181 | } 182 | 183 | inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset) 184 | { 185 | ASSERT_VALID_CODE_OFFSET(offset); 186 | return CodeLocationCall(reinterpret_cast(dataLocation()) + offset); 187 | } 188 | 189 | inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset, NearCallMode callMode) 190 | { 191 | ASSERT_VALID_CODE_OFFSET(offset); 192 | return CodeLocationNearCall(reinterpret_cast(dataLocation()) + offset, callMode); 193 | } 194 | 195 | inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset) 196 | { 197 | ASSERT_VALID_CODE_OFFSET(offset); 198 | return CodeLocationDataLabelPtr(reinterpret_cast(dataLocation()) + offset); 199 | } 200 | 201 | inline CodeLocationDataLabel32 CodeLocationCommon::dataLabel32AtOffset(int offset) 202 | { 203 | ASSERT_VALID_CODE_OFFSET(offset); 204 | return CodeLocationDataLabel32(reinterpret_cast(dataLocation()) + offset); 205 | } 206 | 207 | inline CodeLocationDataLabelCompact CodeLocationCommon::dataLabelCompactAtOffset(int offset) 208 | { 209 | ASSERT_VALID_CODE_OFFSET(offset); 210 | return CodeLocationDataLabelCompact(reinterpret_cast(dataLocation()) + offset); 211 | } 212 | 213 | inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(int offset) 214 | { 215 | ASSERT_VALID_CODE_OFFSET(offset); 216 | return CodeLocationConvertibleLoad(reinterpret_cast(dataLocation()) + offset); 217 | } 218 | 219 | } // namespace JSC 220 | 221 | #endif // ENABLE(ASSEMBLER) 222 | 223 | #endif // CodeLocation_h 224 | -------------------------------------------------------------------------------- /DisallowMacroScratchRegisterUsage.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef DisallowMacroScratchRegisterUsage_h 27 | #define DisallowMacroScratchRegisterUsage_h 28 | 29 | #if ENABLE(ASSEMBLER) 30 | 31 | #include "MacroAssembler.h" 32 | 33 | namespace JSC { 34 | 35 | class DisallowMacroScratchRegisterUsage { 36 | public: 37 | DisallowMacroScratchRegisterUsage(MacroAssembler& masm) 38 | : m_masm(masm) 39 | , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister) 40 | { 41 | masm.m_allowScratchRegister = false; 42 | } 43 | 44 | ~DisallowMacroScratchRegisterUsage() 45 | { 46 | m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister; 47 | } 48 | 49 | private: 50 | MacroAssembler& m_masm; 51 | bool m_oldValueOfAllowScratchRegister; 52 | }; 53 | 54 | } // namespace JSC 55 | 56 | #endif // ENABLE(ASSEMBLER) 57 | 58 | #endif // DisallowMacroScratchRegisterUsage_h 59 | 60 | -------------------------------------------------------------------------------- /ExecutableAllocator.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #include "config.h" 27 | #include "ExecutableAllocator.h" 28 | #include 29 | 30 | //#include "JSCInlines.h" 31 | 32 | #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) 33 | //#include "CodeProfiling.h" 34 | #include 35 | #include 36 | #include 37 | #include 38 | #include 39 | #include 40 | #endif 41 | 42 | // Uncomment to create an artificial executable memory usage limit. This limit 43 | // is imperfect and is primarily useful for testing the VM's ability to handle 44 | // out-of-executable-memory situations. 45 | // #define EXECUTABLE_MEMORY_LIMIT 1000000 46 | 47 | #if ENABLE(ASSEMBLER) 48 | 49 | using namespace WTF; 50 | 51 | namespace JSC { 52 | 53 | #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) 54 | 55 | class DemandExecutableAllocator : public MetaAllocator { 56 | public: 57 | DemandExecutableAllocator() 58 | : MetaAllocator(jitAllocationGranule) 59 | { 60 | std::lock_guard lock(allocatorsMutex()); 61 | allocators().add(this); 62 | // Don't preallocate any memory here. 63 | } 64 | 65 | virtual ~DemandExecutableAllocator() 66 | { 67 | { 68 | std::lock_guard lock(allocatorsMutex()); 69 | allocators().remove(this); 70 | } 71 | for (unsigned i = 0; i < reservations.size(); ++i) 72 | reservations.at(i).deallocate(); 73 | } 74 | 75 | static size_t bytesAllocatedByAllAllocators() 76 | { 77 | size_t total = 0; 78 | std::lock_guard lock(allocatorsMutex()); 79 | for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) 80 | total += (*allocator)->bytesAllocated(); 81 | return total; 82 | } 83 | 84 | static size_t bytesCommittedByAllocactors() 85 | { 86 | size_t total = 0; 87 | std::lock_guard lock(allocatorsMutex()); 88 | for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) 89 | total += (*allocator)->bytesCommitted(); 90 | return total; 91 | } 92 | 93 | #if ENABLE(META_ALLOCATOR_PROFILE) 94 | static void dumpProfileFromAllAllocators() 95 | { 96 | std::lock_guard lock(allocatorsMutex()); 97 | for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) 98 | (*allocator)->dumpProfile(); 99 | } 100 | #endif 101 | 102 | protected: 103 | virtual void* allocateNewSpace(size_t& numPages) 104 | { 105 | size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize(); 106 | 107 | ASSERT(newNumPages >= numPages); 108 | 109 | numPages = newNumPages; 110 | 111 | #ifdef EXECUTABLE_MEMORY_LIMIT 112 | if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT) 113 | return 0; 114 | #endif 115 | 116 | PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); 117 | RELEASE_ASSERT(reservation); 118 | 119 | reservations.append(reservation); 120 | 121 | return reservation.base(); 122 | } 123 | 124 | virtual void notifyNeedPage(void* page) 125 | { 126 | OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true); 127 | } 128 | 129 | virtual void notifyPageIsFree(void* page) 130 | { 131 | OSAllocator::decommit(page, pageSize()); 132 | } 133 | 134 | private: 135 | Vector reservations; 136 | static HashSet& allocators() 137 | { 138 | static NeverDestroyed> set; 139 | return set; 140 | } 141 | 142 | static StaticLock& allocatorsMutex() 143 | { 144 | static StaticLock mutex; 145 | 146 | return mutex; 147 | } 148 | }; 149 | 150 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) 151 | void ExecutableAllocator::initializeAllocator() 152 | { 153 | } 154 | #else 155 | static DemandExecutableAllocator* gAllocator; 156 | 157 | namespace { 158 | static inline DemandExecutableAllocator* allocator() 159 | { 160 | return gAllocator; 161 | } 162 | } 163 | 164 | void ExecutableAllocator::initializeAllocator() 165 | { 166 | ASSERT(!gAllocator); 167 | gAllocator = new DemandExecutableAllocator(); 168 | /*CodeProfiling::notifyAllocator(gAllocator);*/ 169 | } 170 | #endif 171 | 172 | ExecutableAllocator::ExecutableAllocator(/*VM&*/) 173 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) 174 | : m_allocator(std::make_unique()) 175 | #endif 176 | { 177 | ASSERT(allocator()); 178 | } 179 | 180 | ExecutableAllocator::~ExecutableAllocator() 181 | { 182 | } 183 | 184 | bool ExecutableAllocator::isValid() const 185 | { 186 | return true; 187 | } 188 | 189 | bool ExecutableAllocator::underMemoryPressure() 190 | { 191 | #ifdef EXECUTABLE_MEMORY_LIMIT 192 | return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2; 193 | #else 194 | return false; 195 | #endif 196 | } 197 | 198 | double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) 199 | { 200 | double result; 201 | #ifdef EXECUTABLE_MEMORY_LIMIT 202 | size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage; 203 | if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT) 204 | bytesAllocated = EXECUTABLE_MEMORY_LIMIT; 205 | result = static_cast(EXECUTABLE_MEMORY_LIMIT) / 206 | (EXECUTABLE_MEMORY_LIMIT - bytesAllocated); 207 | #else 208 | UNUSED_PARAM(addedMemoryUsage); 209 | result = 1.0; 210 | #endif 211 | if (result < 1.0) 212 | result = 1.0; 213 | return result; 214 | 215 | } 216 | 217 | RefPtr ExecutableAllocator::allocate(/*VM&,*/ size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) 218 | { 219 | RefPtr result = allocator()->allocate(sizeInBytes, ownerUID); 220 | RELEASE_ASSERT(result || effort != JITCompilationMustSucceed); 221 | return result; 222 | } 223 | 224 | size_t ExecutableAllocator::committedByteCount() 225 | { 226 | return DemandExecutableAllocator::bytesCommittedByAllocactors(); 227 | } 228 | 229 | #if ENABLE(META_ALLOCATOR_PROFILE) 230 | void ExecutableAllocator::dumpProfile() 231 | { 232 | DemandExecutableAllocator::dumpProfileFromAllAllocators(); 233 | } 234 | #endif 235 | 236 | Lock& ExecutableAllocator::getLock() const 237 | { 238 | return gAllocator->getLock(); 239 | } 240 | 241 | bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address) 242 | { 243 | return gAllocator->isInAllocatedMemory(locker, address); 244 | } 245 | 246 | #endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) 247 | 248 | #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) 249 | 250 | #if OS(WINDOWS) 251 | #error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform." 252 | #endif 253 | 254 | void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting) 255 | { 256 | size_t pageSize = WTF::pageSize(); 257 | 258 | // Calculate the start of the page containing this region, 259 | // and account for this extra memory within size. 260 | intptr_t startPtr = reinterpret_cast(start); 261 | intptr_t pageStartPtr = startPtr & ~(pageSize - 1); 262 | void* pageStart = reinterpret_cast(pageStartPtr); 263 | size += (startPtr - pageStartPtr); 264 | 265 | // Round size up 266 | size += (pageSize - 1); 267 | size &= ~(pageSize - 1); 268 | 269 | mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX); 270 | } 271 | 272 | #endif 273 | 274 | } 275 | 276 | #endif // HAVE(ASSEMBLER) 277 | -------------------------------------------------------------------------------- /ExecutableAllocator.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef ExecutableAllocator_h 27 | #define ExecutableAllocator_h 28 | #include "JITCompilationEffort.h" 29 | #include // for ptrdiff_t 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | 37 | #if OS(IOS) 38 | #include 39 | #endif 40 | 41 | #if OS(IOS) 42 | #include 43 | #endif 44 | 45 | #if CPU(MIPS) && OS(LINUX) 46 | #include 47 | #endif 48 | 49 | #if CPU(SH4) && OS(LINUX) 50 | #include 51 | #include 52 | #include 53 | #include 54 | #endif 55 | 56 | #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4) 57 | 58 | #define EXECUTABLE_POOL_WRITABLE true 59 | 60 | namespace JSC { 61 | 62 | //class VM; 63 | 64 | static const unsigned jitAllocationGranule = 32; 65 | 66 | typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle; 67 | 68 | #if ENABLE(ASSEMBLER) 69 | 70 | #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) 71 | class DemandExecutableAllocator; 72 | #endif 73 | 74 | #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) 75 | #if CPU(ARM) 76 | static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024; 77 | #elif CPU(ARM64) 78 | static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; 79 | #elif CPU(X86_64) 80 | static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024; 81 | #else 82 | static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; 83 | #endif 84 | #if CPU(ARM) 85 | static const double executablePoolReservationFraction = 0.15; 86 | #else 87 | static const double executablePoolReservationFraction = 0.25; 88 | #endif 89 | 90 | extern JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool; 91 | extern JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool; 92 | 93 | typedef void (*JITWriteFunction)(off_t, const void*, size_t); 94 | extern JS_EXPORTDATA JITWriteFunction jitWriteFunction; 95 | 96 | static inline void* performJITMemcpy(void *dst, const void *src, size_t n) 97 | { 98 | // Use execute-only write thunk for writes inside the JIT region. This is a variant of 99 | // memcpy that takes an offset into the JIT region as its destination (first) parameter. 100 | if (jitWriteFunction && (uintptr_t)dst >= startOfFixedExecutableMemoryPool && (uintptr_t)dst <= endOfFixedExecutableMemoryPool) { 101 | off_t offset = (off_t)((uintptr_t)dst - startOfFixedExecutableMemoryPool); 102 | jitWriteFunction(offset, src, n); 103 | return dst; 104 | } 105 | 106 | // Use regular memcpy for writes outside the JIT region. 107 | return memcpy(dst, src, n); 108 | } 109 | 110 | #else // ENABLE(EXECUTABLE_ALLOCATOR_FIXED) 111 | static inline void* performJITMemcpy(void *dst, const void *src, size_t n) 112 | { 113 | return memcpy(dst, src, n); 114 | } 115 | #endif 116 | 117 | class ExecutableAllocator { 118 | enum ProtectionSetting { Writable, Executable }; 119 | 120 | public: 121 | ExecutableAllocator(/*VM&*/); 122 | ~ExecutableAllocator(); 123 | 124 | static void initializeAllocator(); 125 | 126 | bool isValid() const; 127 | 128 | static bool underMemoryPressure(); 129 | 130 | static double memoryPressureMultiplier(size_t addedMemoryUsage); 131 | 132 | #if ENABLE(META_ALLOCATOR_PROFILE) 133 | static void dumpProfile(); 134 | #else 135 | static void dumpProfile() { } 136 | #endif 137 | 138 | RefPtr allocate(/*VM&,*/ size_t sizeInBytes, void* ownerUID, JITCompilationEffort); 139 | 140 | bool isValidExecutableMemory(const LockHolder&, void* address); 141 | 142 | static size_t committedByteCount(); 143 | 144 | Lock& getLock() const; 145 | }; 146 | 147 | #endif // ENABLE(JIT) && ENABLE(ASSEMBLER) 148 | 149 | } // namespace JSC 150 | 151 | #endif // !defined(ExecutableAllocator) 152 | -------------------------------------------------------------------------------- /JITCompilationEffort.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef JITCompilationEffort_h 27 | #define JITCompilationEffort_h 28 | 29 | namespace JSC { 30 | 31 | enum JITCompilationEffort { 32 | JITCompilationCanFail, 33 | JITCompilationMustSucceed 34 | }; 35 | 36 | } // namespace JSC 37 | 38 | #endif // JITCompilationEffort_h 39 | 40 | -------------------------------------------------------------------------------- /LinkBuffer.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2012-2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #include "config.h" 27 | #include "LinkBuffer.h" 28 | 29 | #if ENABLE(ASSEMBLER) 30 | 31 | //#include "CodeBlock.h" 32 | //#include "JITCode.h" 33 | //#include "JSCInlines.h" 34 | #include "Options.h" 35 | //#include "disassembler/Disassembler.h" 36 | //#include "VM.h" 37 | //#include 38 | //#include 39 | //#include 40 | 41 | namespace JSC { 42 | 43 | //bool shouldDumpDisassemblyFor(CodeBlock* codeBlock) 44 | //{ 45 | // if (JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly()) 46 | // return true; 47 | // return Options::dumpDisassembly(); 48 | //} 49 | 50 | LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly() 51 | { 52 | performFinalization(); 53 | 54 | ASSERT(m_didAllocate); 55 | if (m_executableMemory) 56 | return CodeRef(m_executableMemory); 57 | 58 | return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code)); 59 | } 60 | 61 | LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...) 62 | { 63 | CodeRef result = finalizeCodeWithoutDisassembly(); 64 | 65 | //if (m_alreadyDisassembled) 66 | // return result; 67 | // 68 | //StringPrintStream out; 69 | //out.printf("Generated JIT code for "); 70 | //va_list argList; 71 | //va_start(argList, format); 72 | //out.vprintf(format, argList); 73 | //va_end(argList); 74 | //out.printf(":\n"); 75 | 76 | //out.printf(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast(result.code().executableAddress()) + result.size()); 77 | // 78 | //CString header = out.toCString(); 79 | // 80 | ///*if (Options::asyncDisassembly()) { 81 | // disassembleAsynchronously(header, result, m_size, " "); 82 | // return result; 83 | //}*/ 84 | // 85 | //dataLog(header); 86 | //disassemble(result.code(), m_size, " ", WTF::dataFile()); 87 | 88 | return result; 89 | } 90 | 91 | #if ENABLE(BRANCH_COMPACTION) 92 | static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset) 93 | { 94 | int32_t ptr = regionStart / sizeof(int32_t); 95 | const int32_t end = regionEnd / sizeof(int32_t); 96 | int32_t* offsets = reinterpret_cast_ptr(assemblerData.buffer()); 97 | while (ptr < end) 98 | offsets[ptr++] = offset; 99 | } 100 | 101 | template 102 | void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) 103 | { 104 | allocate(macroAssembler, ownerUID, effort); 105 | const size_t initialSize = macroAssembler.m_assembler.codeSize(); 106 | if (didFailToAllocate()) 107 | return; 108 | 109 | Vector& jumpsToLink = macroAssembler.jumpsToLink(); 110 | m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData(); 111 | uint8_t* inData = reinterpret_cast(m_assemblerStorage.buffer()); 112 | 113 | AssemblerData outBuffer(m_size); 114 | 115 | uint8_t* outData = reinterpret_cast(outBuffer.buffer()); 116 | uint8_t* codeOutData = reinterpret_cast(m_code); 117 | 118 | int readPtr = 0; 119 | int writePtr = 0; 120 | unsigned jumpCount = jumpsToLink.size(); 121 | if (m_shouldPerformBranchCompaction) { 122 | for (unsigned i = 0; i < jumpCount; ++i) { 123 | int offset = readPtr - writePtr; 124 | ASSERT(!(offset & 1)); 125 | 126 | // Copy the instructions from the last jump to the current one. 127 | size_t regionSize = jumpsToLink[i].from() - readPtr; 128 | InstructionType* copySource = reinterpret_cast_ptr(inData + readPtr); 129 | InstructionType* copyEnd = reinterpret_cast_ptr(inData + readPtr + regionSize); 130 | InstructionType* copyDst = reinterpret_cast_ptr(outData + writePtr); 131 | ASSERT(!(regionSize % 2)); 132 | ASSERT(!(readPtr % 2)); 133 | ASSERT(!(writePtr % 2)); 134 | while (copySource != copyEnd) 135 | *copyDst++ = *copySource++; 136 | recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset); 137 | readPtr += regionSize; 138 | writePtr += regionSize; 139 | 140 | // Calculate absolute address of the jump target, in the case of backwards 141 | // branches we need to be precise, forward branches we are pessimistic 142 | const uint8_t* target; 143 | if (jumpsToLink[i].to() >= jumpsToLink[i].from()) 144 | target = codeOutData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far 145 | else 146 | target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to()); 147 | 148 | JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], codeOutData + writePtr, target); 149 | // Compact branch if we can... 150 | if (MacroAssembler::canCompact(jumpsToLink[i].type())) { 151 | // Step back in the write stream 152 | int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType); 153 | if (delta) { 154 | writePtr -= delta; 155 | recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr); 156 | } 157 | } 158 | jumpsToLink[i].setFrom(writePtr); 159 | } 160 | } else { 161 | if (!ASSERT_DISABLED) { 162 | for (unsigned i = 0; i < jumpCount; ++i) 163 | ASSERT(!MacroAssembler::canCompact(jumpsToLink[i].type())); 164 | } 165 | } 166 | // Copy everything after the last jump 167 | memcpy(outData + writePtr, inData + readPtr, initialSize - readPtr); 168 | recordLinkOffsets(m_assemblerStorage, readPtr, initialSize, readPtr - writePtr); 169 | 170 | for (unsigned i = 0; i < jumpCount; ++i) { 171 | uint8_t* location = codeOutData + jumpsToLink[i].from(); 172 | uint8_t* target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to()); 173 | MacroAssembler::link(jumpsToLink[i], outData + jumpsToLink[i].from(), location, target); 174 | } 175 | 176 | jumpsToLink.clear(); 177 | 178 | size_t compactSize = writePtr + initialSize - readPtr; 179 | if (m_executableMemory) { 180 | m_size = compactSize; 181 | m_executableMemory->shrink(m_size); 182 | } else { 183 | size_t nopSizeInBytes = initialSize - compactSize; 184 | bool isCopyingToExecutableMemory = false; 185 | MacroAssembler::AssemblerType_T::fillNops(outData + compactSize, nopSizeInBytes, isCopyingToExecutableMemory); 186 | } 187 | 188 | performJITMemcpy(m_code, outData, m_size); 189 | 190 | #if DUMP_LINK_STATISTICS 191 | dumpLinkStatistics(m_code, initialSize, m_size); 192 | #endif 193 | #if DUMP_CODE 194 | dumpCode(m_code, m_size); 195 | #endif 196 | } 197 | #endif 198 | 199 | 200 | void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) 201 | { 202 | #if !ENABLE(BRANCH_COMPACTION) 203 | #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL 204 | macroAssembler.m_assembler.buffer().flushConstantPool(false); 205 | #endif 206 | allocate(macroAssembler, ownerUID, effort); 207 | if (!m_didAllocate) 208 | return; 209 | ASSERT(m_code); 210 | AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer(); 211 | #if CPU(ARM_TRADITIONAL) 212 | macroAssembler.m_assembler.prepareExecutableCopy(m_code); 213 | #endif 214 | performJITMemcpy(m_code, buffer.data(), buffer.codeSize()); 215 | #if CPU(MIPS) 216 | macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code); 217 | #endif 218 | #elif CPU(ARM_THUMB2) 219 | copyCompactAndLinkCode(macroAssembler, ownerUID, effort); 220 | #elif CPU(ARM64) 221 | copyCompactAndLinkCode(macroAssembler, ownerUID, effort); 222 | #endif // !ENABLE(BRANCH_COMPACTION) 223 | 224 | m_linkTasks = WTFMove(macroAssembler.m_linkTasks); 225 | } 226 | 227 | void LinkBuffer::allocate(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) 228 | { 229 | size_t initialSize = macroAssembler.m_assembler.codeSize(); 230 | if (m_code) { 231 | if (initialSize > m_size) 232 | return; 233 | 234 | size_t nopsToFillInBytes = m_size - initialSize; 235 | macroAssembler.emitNops(nopsToFillInBytes); 236 | m_didAllocate = true; 237 | return; 238 | } 239 | 240 | /*ASSERT(m_vm != nullptr); 241 | m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, initialSize, ownerUID, effort);*/ 242 | ASSERT(m_allocator != nullptr); 243 | m_executableMemory = m_allocator->allocate(initialSize, ownerUID, effort); 244 | if (!m_executableMemory) 245 | return; 246 | m_code = m_executableMemory->start(); 247 | m_size = initialSize; 248 | m_didAllocate = true; 249 | } 250 | 251 | void LinkBuffer::performFinalization() 252 | { 253 | for (auto& task : m_linkTasks) 254 | task->run(*this); 255 | 256 | #ifndef NDEBUG 257 | ASSERT(!isCompilationThread()); 258 | ASSERT(!m_completed); 259 | ASSERT(isValid()); 260 | m_completed = true; 261 | #endif 262 | 263 | MacroAssembler::cacheFlush(code(), m_size); 264 | } 265 | 266 | #if DUMP_LINK_STATISTICS 267 | void LinkBuffer::dumpLinkStatistics(void* code, size_t initializeSize, size_t finalSize) 268 | { 269 | static unsigned linkCount = 0; 270 | static unsigned totalInitialSize = 0; 271 | static unsigned totalFinalSize = 0; 272 | linkCount++; 273 | totalInitialSize += initialSize; 274 | totalFinalSize += finalSize; 275 | dataLogF("link %p: orig %u, compact %u (delta %u, %.2f%%)\n", 276 | code, static_cast(initialSize), static_cast(finalSize), 277 | static_cast(initialSize - finalSize), 278 | 100.0 * (initialSize - finalSize) / initialSize); 279 | dataLogF("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n", 280 | linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize, 281 | 100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize); 282 | } 283 | #endif 284 | 285 | #if DUMP_CODE 286 | void LinkBuffer::dumpCode(void* code, size_t size) 287 | { 288 | #if CPU(ARM_THUMB2) 289 | // Dump the generated code in an asm file format that can be assembled and then disassembled 290 | // for debugging purposes. For example, save this output as jit.s: 291 | // gcc -arch armv7 -c jit.s 292 | // otool -tv jit.o 293 | static unsigned codeCount = 0; 294 | unsigned short* tcode = static_cast(code); 295 | size_t tsize = size / sizeof(short); 296 | char nameBuf[128]; 297 | snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++); 298 | dataLogF("\t.syntax unified\n" 299 | "\t.section\t__TEXT,__text,regular,pure_instructions\n" 300 | "\t.globl\t%s\n" 301 | "\t.align 2\n" 302 | "\t.code 16\n" 303 | "\t.thumb_func\t%s\n" 304 | "# %p\n" 305 | "%s:\n", nameBuf, nameBuf, code, nameBuf); 306 | 307 | for (unsigned i = 0; i < tsize; i++) 308 | dataLogF("\t.short\t0x%x\n", tcode[i]); 309 | #elif CPU(ARM_TRADITIONAL) 310 | // gcc -c jit.s 311 | // objdump -D jit.o 312 | static unsigned codeCount = 0; 313 | unsigned int* tcode = static_cast(code); 314 | size_t tsize = size / sizeof(unsigned int); 315 | char nameBuf[128]; 316 | snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++); 317 | dataLogF("\t.globl\t%s\n" 318 | "\t.align 4\n" 319 | "\t.code 32\n" 320 | "\t.text\n" 321 | "# %p\n" 322 | "%s:\n", nameBuf, code, nameBuf); 323 | 324 | for (unsigned i = 0; i < tsize; i++) 325 | dataLogF("\t.long\t0x%x\n", tcode[i]); 326 | #endif 327 | } 328 | #endif 329 | 330 | } // namespace JSC 331 | 332 | #endif // ENABLE(ASSEMBLER) 333 | 334 | 335 | -------------------------------------------------------------------------------- /LinkBuffer.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2009, 2010, 2012-2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef LinkBuffer_h 27 | #define LinkBuffer_h 28 | 29 | #if ENABLE(ASSEMBLER) 30 | 31 | #define DUMP_LINK_STATISTICS 0 32 | #define DUMP_CODE 0 33 | 34 | #define GLOBAL_THUNK_ID reinterpret_cast(static_cast(-1)) 35 | #define REGEXP_CODE_ID reinterpret_cast(static_cast(-2)) 36 | #define CSS_CODE_ID reinterpret_cast(static_cast(-3)) 37 | 38 | #include "JITCompilationEffort.h" 39 | #include "MacroAssembler.h" 40 | #include 41 | #include 42 | #include 43 | 44 | namespace JSC { 45 | 46 | //class CodeBlock; 47 | class ExecutableAllocator; 48 | 49 | // LinkBuffer: 50 | // 51 | // This class assists in linking code generated by the macro assembler, once code generation 52 | // has been completed, and the code has been copied to is final location in memory. At this 53 | // time pointers to labels within the code may be resolved, and relative offsets to external 54 | // addresses may be fixed. 55 | // 56 | // Specifically: 57 | // * Jump objects may be linked to external targets, 58 | // * The address of Jump objects may taken, such that it can later be relinked. 59 | // * The return address of a Call may be acquired. 60 | // * The address of a Label pointing into the code may be resolved. 61 | // * The value referenced by a DataLabel may be set. 62 | // 63 | class LinkBuffer { 64 | WTF_MAKE_NONCOPYABLE(LinkBuffer); WTF_MAKE_FAST_ALLOCATED; 65 | 66 | typedef MacroAssemblerCodeRef CodeRef; 67 | typedef MacroAssemblerCodePtr CodePtr; 68 | typedef MacroAssembler::Label Label; 69 | typedef MacroAssembler::Jump Jump; 70 | typedef MacroAssembler::PatchableJump PatchableJump; 71 | typedef MacroAssembler::JumpList JumpList; 72 | typedef MacroAssembler::Call Call; 73 | typedef MacroAssembler::DataLabelCompact DataLabelCompact; 74 | typedef MacroAssembler::DataLabel32 DataLabel32; 75 | typedef MacroAssembler::DataLabelPtr DataLabelPtr; 76 | typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel; 77 | #if ENABLE(BRANCH_COMPACTION) 78 | typedef MacroAssembler::LinkRecord LinkRecord; 79 | typedef MacroAssembler::JumpLinkType JumpLinkType; 80 | #endif 81 | 82 | public: 83 | LinkBuffer(/*VM& vm,*/ExecutableAllocator *pAllocator, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed) 84 | : m_size(0) 85 | , m_didAllocate(false) 86 | , m_code(0) 87 | //, m_vm(&vm) 88 | , m_allocator(pAllocator) 89 | #ifndef NDEBUG 90 | , m_completed(false) 91 | #endif 92 | { 93 | linkCode(macroAssembler, ownerUID, effort); 94 | } 95 | 96 | LinkBuffer(MacroAssembler& macroAssembler, void* code, size_t size, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true) 97 | : m_size(size) 98 | , m_didAllocate(false) 99 | , m_code(code) 100 | //, m_vm(0) 101 | #ifndef NDEBUG 102 | , m_completed(false) 103 | #endif 104 | { 105 | #if ENABLE(BRANCH_COMPACTION) 106 | m_shouldPerformBranchCompaction = shouldPerformBranchCompaction; 107 | #else 108 | UNUSED_PARAM(shouldPerformBranchCompaction); 109 | #endif 110 | linkCode(macroAssembler, 0, effort); 111 | } 112 | 113 | ~LinkBuffer() 114 | { 115 | } 116 | 117 | bool didFailToAllocate() const 118 | { 119 | return !m_didAllocate; 120 | } 121 | 122 | bool isValid() const 123 | { 124 | return !didFailToAllocate(); 125 | } 126 | 127 | // These methods are used to link or set values at code generation time. 128 | 129 | void link(Call call, FunctionPtr function) 130 | { 131 | ASSERT(call.isFlagSet(Call::Linkable)); 132 | call.m_label = applyOffset(call.m_label); 133 | MacroAssembler::linkCall(code(), call, function); 134 | } 135 | 136 | void link(Call call, CodeLocationLabel label) 137 | { 138 | link(call, FunctionPtr(label.executableAddress())); 139 | } 140 | 141 | void link(Jump jump, CodeLocationLabel label) 142 | { 143 | jump.m_label = applyOffset(jump.m_label); 144 | MacroAssembler::linkJump(code(), jump, label); 145 | } 146 | 147 | void link(const JumpList& list, CodeLocationLabel label) 148 | { 149 | for (const Jump& jump : list.jumps()) 150 | link(jump, label); 151 | } 152 | 153 | void patch(DataLabelPtr label, void* value) 154 | { 155 | AssemblerLabel target = applyOffset(label.m_label); 156 | MacroAssembler::linkPointer(code(), target, value); 157 | } 158 | 159 | void patch(DataLabelPtr label, CodeLocationLabel value) 160 | { 161 | AssemblerLabel target = applyOffset(label.m_label); 162 | MacroAssembler::linkPointer(code(), target, value.executableAddress()); 163 | } 164 | 165 | // These methods are used to obtain handles to allow the code to be relinked / repatched later. 166 | 167 | CodeLocationLabel entrypoint() 168 | { 169 | return CodeLocationLabel(code()); 170 | } 171 | 172 | CodeLocationCall locationOf(Call call) 173 | { 174 | ASSERT(call.isFlagSet(Call::Linkable)); 175 | ASSERT(!call.isFlagSet(Call::Near)); 176 | return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label))); 177 | } 178 | 179 | CodeLocationNearCall locationOfNearCall(Call call) 180 | { 181 | ASSERT(call.isFlagSet(Call::Linkable)); 182 | ASSERT(call.isFlagSet(Call::Near)); 183 | return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)), 184 | call.isFlagSet(Call::Tail) ? NearCallMode::Tail : NearCallMode::Regular); 185 | } 186 | 187 | CodeLocationLabel locationOf(PatchableJump jump) 188 | { 189 | return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label))); 190 | } 191 | 192 | CodeLocationLabel locationOf(Label label) 193 | { 194 | return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); 195 | } 196 | 197 | CodeLocationDataLabelPtr locationOf(DataLabelPtr label) 198 | { 199 | return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); 200 | } 201 | 202 | CodeLocationDataLabel32 locationOf(DataLabel32 label) 203 | { 204 | return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); 205 | } 206 | 207 | CodeLocationDataLabelCompact locationOf(DataLabelCompact label) 208 | { 209 | return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); 210 | } 211 | 212 | CodeLocationConvertibleLoad locationOf(ConvertibleLoadLabel label) 213 | { 214 | return CodeLocationConvertibleLoad(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); 215 | } 216 | 217 | // This method obtains the return address of the call, given as an offset from 218 | // the start of the code. 219 | unsigned returnAddressOffset(Call call) 220 | { 221 | call.m_label = applyOffset(call.m_label); 222 | return MacroAssembler::getLinkerCallReturnOffset(call); 223 | } 224 | 225 | uint32_t offsetOf(Label label) 226 | { 227 | return applyOffset(label.m_label).m_offset; 228 | } 229 | 230 | unsigned offsetOf(PatchableJump jump) 231 | { 232 | return applyOffset(jump.m_jump.m_label).m_offset; 233 | } 234 | 235 | // Upon completion of all patching 'FINALIZE_CODE()' should be called once to 236 | // complete generation of the code. Alternatively, call 237 | // finalizeCodeWithoutDisassembly() directly if you have your own way of 238 | // displaying disassembly. 239 | 240 | JS_EXPORT_PRIVATE CodeRef finalizeCodeWithoutDisassembly(); 241 | JS_EXPORT_PRIVATE CodeRef finalizeCodeWithDisassembly(const char* format, ...) WTF_ATTRIBUTE_PRINTF(2, 3); 242 | 243 | CodePtr trampolineAt(Label label) 244 | { 245 | return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label))); 246 | } 247 | 248 | void* debugAddress() 249 | { 250 | return m_code; 251 | } 252 | 253 | size_t size() const { return m_size; } 254 | 255 | bool wasAlreadyDisassembled() const { return m_alreadyDisassembled; } 256 | void didAlreadyDisassemble() { m_alreadyDisassembled = true; } 257 | 258 | //VM& vm() { return *m_vm; } 259 | 260 | private: 261 | #if ENABLE(BRANCH_COMPACTION) 262 | int executableOffsetFor(int location) 263 | { 264 | if (!location) 265 | return 0; 266 | return bitwise_cast(m_assemblerStorage.buffer())[location / sizeof(int32_t) - 1]; 267 | } 268 | #endif 269 | 270 | template T applyOffset(T src) 271 | { 272 | #if ENABLE(BRANCH_COMPACTION) 273 | src.m_offset -= executableOffsetFor(src.m_offset); 274 | #endif 275 | return src; 276 | } 277 | 278 | // Keep this private! - the underlying code should only be obtained externally via finalizeCode(). 279 | void* code() 280 | { 281 | return m_code; 282 | } 283 | 284 | void allocate(MacroAssembler&, void* ownerUID, JITCompilationEffort); 285 | 286 | JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort); 287 | #if ENABLE(BRANCH_COMPACTION) 288 | template 289 | void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort); 290 | #endif 291 | 292 | void performFinalization(); 293 | 294 | #if DUMP_LINK_STATISTICS 295 | static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize); 296 | #endif 297 | 298 | #if DUMP_CODE 299 | static void dumpCode(void* code, size_t); 300 | #endif 301 | 302 | RefPtr m_executableMemory; 303 | size_t m_size; 304 | #if ENABLE(BRANCH_COMPACTION) 305 | AssemblerData m_assemblerStorage; 306 | bool m_shouldPerformBranchCompaction { true }; 307 | #endif 308 | bool m_didAllocate; 309 | void* m_code; 310 | //VM* m_vm; 311 | ExecutableAllocator *m_allocator; 312 | #ifndef NDEBUG 313 | bool m_completed; 314 | #endif 315 | bool m_alreadyDisassembled { false }; 316 | Vector>> m_linkTasks; 317 | }; 318 | 319 | #define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \ 320 | (UNLIKELY((condition)) \ 321 | ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \ 322 | : (linkBufferReference).finalizeCodeWithoutDisassembly()) 323 | 324 | //bool shouldDumpDisassemblyFor(CodeBlock*); 325 | 326 | #define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading) (linkBufferReference).finalizeCodeWithoutDisassembly() 327 | //#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading) \ 328 | // FINALIZE_CODE_IF(shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) 329 | 330 | // Use this to finalize code, like so: 331 | // 332 | // CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number)); 333 | // 334 | // Which, in disassembly mode, will print: 335 | // 336 | // Generated JIT code for my super thingy number 42: 337 | // Code at [0x123456, 0x234567]: 338 | // 0x123456: mov $0, 0 339 | // 0x12345a: ret 340 | // 341 | // ... and so on. 342 | // 343 | // Note that the dataLogFArgumentsForHeading are only evaluated when dumpDisassembly 344 | // is true, so you can hide expensive disassembly-only computations inside there. 345 | 346 | #define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \ 347 | FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) 348 | 349 | #define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \ 350 | FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) 351 | 352 | } // namespace JSC 353 | 354 | #endif // ENABLE(ASSEMBLER) 355 | 356 | #endif // LinkBuffer_h 357 | -------------------------------------------------------------------------------- /MacroAssembler.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2012 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #include "config.h" 27 | #include "MacroAssembler.h" 28 | 29 | #if ENABLE(ASSEMBLER) 30 | 31 | #include 32 | 33 | namespace JSC { 34 | 35 | const double MacroAssembler::twoToThe32 = (double)0x100000000ull; 36 | 37 | #if ENABLE(MASM_PROBE) 38 | static void stdFunctionCallback(MacroAssembler::ProbeContext* context) 39 | { 40 | auto func = static_cast*>(context->arg1); 41 | (*func)(context); 42 | } 43 | 44 | void MacroAssembler::probe(std::function func) 45 | { 46 | probe(stdFunctionCallback, new std::function(func), 0); 47 | } 48 | #endif // ENABLE(MASM_PROBE) 49 | 50 | } // namespace JSC 51 | 52 | namespace WTF { 53 | 54 | using namespace JSC; 55 | 56 | void printInternal(PrintStream& out, MacroAssembler::RelationalCondition cond) 57 | { 58 | switch (cond) { 59 | case MacroAssembler::Equal: 60 | out.print("Equal"); 61 | return; 62 | case MacroAssembler::NotEqual: 63 | out.print("NotEqual"); 64 | return; 65 | case MacroAssembler::Above: 66 | out.print("Above"); 67 | return; 68 | case MacroAssembler::AboveOrEqual: 69 | out.print("AboveOrEqual"); 70 | return; 71 | case MacroAssembler::Below: 72 | out.print("Below"); 73 | return; 74 | case MacroAssembler::BelowOrEqual: 75 | out.print("BelowOrEqual"); 76 | return; 77 | case MacroAssembler::GreaterThan: 78 | out.print("GreaterThan"); 79 | return; 80 | case MacroAssembler::GreaterThanOrEqual: 81 | out.print("GreaterThanOrEqual"); 82 | return; 83 | case MacroAssembler::LessThan: 84 | out.print("LessThan"); 85 | return; 86 | case MacroAssembler::LessThanOrEqual: 87 | out.print("LessThanOrEqual"); 88 | return; 89 | } 90 | RELEASE_ASSERT_NOT_REACHED(); 91 | } 92 | 93 | void printInternal(PrintStream& out, MacroAssembler::ResultCondition cond) 94 | { 95 | switch (cond) { 96 | case MacroAssembler::Overflow: 97 | out.print("Overflow"); 98 | return; 99 | case MacroAssembler::Signed: 100 | out.print("Signed"); 101 | return; 102 | case MacroAssembler::PositiveOrZero: 103 | out.print("PositiveOrZero"); 104 | return; 105 | case MacroAssembler::Zero: 106 | out.print("Zero"); 107 | return; 108 | case MacroAssembler::NonZero: 109 | out.print("NonZero"); 110 | return; 111 | } 112 | RELEASE_ASSERT_NOT_REACHED(); 113 | } 114 | 115 | void printInternal(PrintStream& out, MacroAssembler::DoubleCondition cond) 116 | { 117 | switch (cond) { 118 | case MacroAssembler::DoubleEqual: 119 | out.print("DoubleEqual"); 120 | return; 121 | case MacroAssembler::DoubleNotEqual: 122 | out.print("DoubleNotEqual"); 123 | return; 124 | case MacroAssembler::DoubleGreaterThan: 125 | out.print("DoubleGreaterThan"); 126 | return; 127 | case MacroAssembler::DoubleGreaterThanOrEqual: 128 | out.print("DoubleGreaterThanOrEqual"); 129 | return; 130 | case MacroAssembler::DoubleLessThan: 131 | out.print("DoubleLessThan"); 132 | return; 133 | case MacroAssembler::DoubleLessThanOrEqual: 134 | out.print("DoubleLessThanOrEqual"); 135 | return; 136 | case MacroAssembler::DoubleEqualOrUnordered: 137 | out.print("DoubleEqualOrUnordered"); 138 | return; 139 | case MacroAssembler::DoubleNotEqualOrUnordered: 140 | out.print("DoubleNotEqualOrUnordered"); 141 | return; 142 | case MacroAssembler::DoubleGreaterThanOrUnordered: 143 | out.print("DoubleGreaterThanOrUnordered"); 144 | return; 145 | case MacroAssembler::DoubleGreaterThanOrEqualOrUnordered: 146 | out.print("DoubleGreaterThanOrEqualOrUnordered"); 147 | return; 148 | case MacroAssembler::DoubleLessThanOrUnordered: 149 | out.print("DoubleLessThanOrUnordered"); 150 | return; 151 | case MacroAssembler::DoubleLessThanOrEqualOrUnordered: 152 | out.print("DoubleLessThanOrEqualOrUnordered"); 153 | return; 154 | } 155 | 156 | RELEASE_ASSERT_NOT_REACHED(); 157 | } 158 | 159 | } // namespace WTF 160 | 161 | #endif // ENABLE(ASSEMBLER) 162 | 163 | -------------------------------------------------------------------------------- /MacroAssemblerARM.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013-2015 Apple Inc. 3 | * Copyright (C) 2009 University of Szeged 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions 8 | * are met: 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 2. Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * 15 | * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY 16 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 18 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR 19 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 20 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 22 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 23 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | */ 27 | 28 | #include "config.h" 29 | 30 | #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 31 | 32 | #include "MacroAssemblerARM.h" 33 | 34 | #include 35 | 36 | #if OS(LINUX) 37 | #include 38 | #include 39 | #include 40 | #include 41 | #include 42 | #include 43 | #endif 44 | 45 | namespace JSC { 46 | 47 | static bool isVFPPresent() 48 | { 49 | #if OS(LINUX) 50 | int fd = open("/proc/self/auxv", O_RDONLY); 51 | if (fd != -1) { 52 | Elf32_auxv_t aux; 53 | while (read(fd, &aux, sizeof(Elf32_auxv_t))) { 54 | if (aux.a_type == AT_HWCAP) { 55 | close(fd); 56 | return aux.a_un.a_val & HWCAP_VFP; 57 | } 58 | } 59 | close(fd); 60 | } 61 | #endif // OS(LINUX) 62 | 63 | #if (COMPILER(GCC_OR_CLANG) && defined(__VFP_FP__)) 64 | return true; 65 | #else 66 | return false; 67 | #endif 68 | } 69 | 70 | const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent(); 71 | 72 | #if CPU(ARMV5_OR_LOWER) 73 | /* On ARMv5 and below, natural alignment is required. */ 74 | void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) 75 | { 76 | ARMWord op2; 77 | 78 | ASSERT(address.scale >= 0 && address.scale <= 3); 79 | op2 = m_assembler.lsl(address.index, static_cast(address.scale)); 80 | 81 | if (address.offset >= 0 && address.offset + 0x2 <= 0xff) { 82 | m_assembler.add(ARMRegisters::S0, address.base, op2); 83 | m_assembler.halfDtrUp(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset)); 84 | m_assembler.halfDtrUp(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset + 0x2)); 85 | } else if (address.offset < 0 && address.offset >= -0xff) { 86 | m_assembler.add(ARMRegisters::S0, address.base, op2); 87 | m_assembler.halfDtrDown(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset)); 88 | m_assembler.halfDtrDown(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset - 0x2)); 89 | } else { 90 | m_assembler.moveImm(address.offset, ARMRegisters::S0); 91 | m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, op2); 92 | m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, dest, address.base, ARMRegisters::S0); 93 | m_assembler.add(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::Op2Immediate | 0x2); 94 | m_assembler.halfDtrUpRegister(ARMAssembler::LoadUint16, ARMRegisters::S0, address.base, ARMRegisters::S0); 95 | } 96 | m_assembler.orr(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16)); 97 | } 98 | #endif // CPU(ARMV5_OR_LOWER) 99 | 100 | #if ENABLE(MASM_PROBE) 101 | 102 | extern "C" void ctiMasmProbeTrampoline(); 103 | 104 | #if COMPILER(GCC_OR_CLANG) 105 | 106 | // The following are offsets for MacroAssemblerARM::ProbeContext fields accessed 107 | // by the ctiMasmProbeTrampoline stub. 108 | 109 | #define PTR_SIZE 4 110 | #define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) 111 | #define PROBE_ARG1_OFFSET (1 * PTR_SIZE) 112 | #define PROBE_ARG2_OFFSET (2 * PTR_SIZE) 113 | 114 | #define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE) 115 | 116 | #define GPREG_SIZE 4 117 | #define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) 118 | #define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) 119 | #define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) 120 | #define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) 121 | #define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) 122 | #define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) 123 | #define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) 124 | #define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) 125 | #define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) 126 | #define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) 127 | #define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) 128 | #define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) 129 | #define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) 130 | #define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) 131 | #define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) 132 | #define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) 133 | 134 | #define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) 135 | #define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) 136 | 137 | #define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) 138 | 139 | #define FPREG_SIZE 8 140 | #define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) 141 | #define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) 142 | #define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) 143 | #define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) 144 | #define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) 145 | #define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) 146 | #define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) 147 | #define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) 148 | #define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) 149 | #define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) 150 | #define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) 151 | #define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) 152 | #define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) 153 | #define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) 154 | #define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) 155 | #define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) 156 | 157 | #define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) 158 | 159 | // These ASSERTs remind you that if you change the layout of ProbeContext, 160 | // you need to change ctiMasmProbeTrampoline offsets above to match. 161 | #define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM::ProbeContext, x) 162 | COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); 163 | COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); 164 | COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); 165 | 166 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline); 167 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline); 168 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline); 169 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline); 170 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline); 171 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline); 172 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline); 173 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline); 174 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); 175 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); 176 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); 177 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); 178 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline); 179 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); 180 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); 181 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); 182 | 183 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline); 184 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline); 185 | 186 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline); 187 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline); 188 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline); 189 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline); 190 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline); 191 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline); 192 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline); 193 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline); 194 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline); 195 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline); 196 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline); 197 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline); 198 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline); 199 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline); 200 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline); 201 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline); 202 | COMPILE_ASSERT(sizeof(MacroAssemblerARM::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); 203 | #undef PROBE_OFFSETOF 204 | 205 | asm ( 206 | ".text" "\n" 207 | ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" 208 | HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" 209 | INLINE_ARM_FUNCTION(ctiMasmProbeTrampoline) "\n" 210 | SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" 211 | 212 | // MacroAssemblerARM::probe() has already generated code to store some values. 213 | // The top of stack now looks like this: 214 | // esp[0 * ptrSize]: probeFunction 215 | // esp[1 * ptrSize]: arg1 216 | // esp[2 * ptrSize]: arg2 217 | // esp[3 * ptrSize]: saved r3 / S0 218 | // esp[4 * ptrSize]: saved ip 219 | // esp[5 * ptrSize]: saved lr 220 | // esp[6 * ptrSize]: saved sp 221 | 222 | "mov ip, sp" "\n" 223 | "mov r3, sp" "\n" 224 | "sub r3, r3, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n" 225 | 226 | // The ARM EABI specifies that the stack needs to be 16 byte aligned. 227 | "bic r3, r3, #0xf" "\n" 228 | "mov sp, r3" "\n" 229 | 230 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 231 | "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "\n" 232 | "stmia lr, { r0-r11 }" "\n" 233 | "mrs lr, APSR" "\n" 234 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 235 | "vmrs lr, FPSCR" "\n" 236 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" 237 | 238 | "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 239 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" 240 | "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 241 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" 242 | "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 243 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" 244 | "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 245 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R3_OFFSET) "]" "\n" 246 | "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 247 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 248 | "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 249 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 250 | "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 251 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 252 | 253 | "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 254 | 255 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n" 256 | "vstmia.64 ip, { d0-d15 }" "\n" 257 | 258 | "mov fp, sp" "\n" // Save the ProbeContext*. 259 | 260 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" 261 | "mov r0, sp" "\n" // the ProbeContext* arg. 262 | "blx ip" "\n" 263 | 264 | "mov sp, fp" "\n" 265 | 266 | // To enable probes to modify register state, we copy all registers 267 | // out of the ProbeContext before returning. 268 | 269 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n" 270 | "vldmdb.64 ip!, { d0-d15 }" "\n" 271 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n" 272 | "ldmdb ip, { r0-r11 }" "\n" 273 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" 274 | "vmsr FPSCR, ip" "\n" 275 | 276 | // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr. 277 | // There are 2 issues that complicate the restoration of these last few 278 | // registers: 279 | // 280 | // 1. Normal ARM calling convention relies on moving lr to pc to return to 281 | // the caller. In our case, the address to return to is specified by 282 | // ProbeContext.cpu.pc. And at that moment, we won't have any available 283 | // scratch registers to hold the return address (lr needs to hold 284 | // ProbeContext.cpu.lr, not the return address). 285 | // 286 | // The solution is to store the return address on the stack and load the 287 | // pc from there. 288 | // 289 | // 2. Issue 1 means we will need to write to the stack location at 290 | // ProbeContext.cpu.sp - 4. But if the user probe function had modified 291 | // the value of ProbeContext.cpu.sp to point in the range between 292 | // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for 293 | // Issue 1 may trash the values to be restored before we can restore 294 | // them. 295 | // 296 | // The solution is to check if ProbeContext.cpu.sp contains a value in 297 | // the undesirable range. If so, we copy the remaining ProbeContext 298 | // register data to a safe range (at memory lower than where 299 | // ProbeContext.cpu.sp points) first, and restore the remaining register 300 | // from this new range. 301 | 302 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n" 303 | "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 304 | "cmp lr, ip" "\n" 305 | "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" 306 | 307 | // We get here because the new expected stack pointer location is lower 308 | // than where it's supposed to be. This means the safe range of stack 309 | // memory where we'll be copying the remaining register restore values to 310 | // might be in a region of memory below the sp i.e. unallocated stack 311 | // memory. This in turn makes it vulnerable to interrupts potentially 312 | // trashing the copied values. To prevent that, we must first allocate the 313 | // needed stack memory by adjusting the sp before the copying. 314 | 315 | "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE) 316 | " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n" 317 | 318 | "mov ip, sp" "\n" 319 | "mov sp, lr" "\n" 320 | "mov lr, ip" "\n" 321 | 322 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 323 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 324 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 325 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 326 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 327 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 328 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 329 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 330 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 331 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 332 | 333 | SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" 334 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 335 | "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 336 | "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n" 337 | "str ip, [lr]" "\n" 338 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 339 | 340 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 341 | "msr APSR, ip" "\n" 342 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 343 | "mov lr, ip" "\n" 344 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 345 | "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 346 | 347 | "pop { pc }" "\n" 348 | ); 349 | #endif // COMPILER(GCC_OR_CLANG) 350 | 351 | void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* arg1, void* arg2) 352 | { 353 | push(RegisterID::sp); 354 | push(RegisterID::lr); 355 | push(RegisterID::ip); 356 | push(RegisterID::S0); 357 | // The following uses RegisterID::S0. So, they must come after we push S0 above. 358 | push(trustedImm32FromPtr(arg2)); 359 | push(trustedImm32FromPtr(arg1)); 360 | push(trustedImm32FromPtr(function)); 361 | 362 | move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::S0); 363 | m_assembler.blx(RegisterID::S0); 364 | 365 | } 366 | #endif // ENABLE(MASM_PROBE) 367 | 368 | } // namespace JSC 369 | 370 | #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 371 | -------------------------------------------------------------------------------- /MacroAssemblerARMv7.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013-2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #include "config.h" 27 | 28 | #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) 29 | #include "MacroAssemblerARMv7.h" 30 | 31 | #include 32 | 33 | namespace JSC { 34 | 35 | #if ENABLE(MASM_PROBE) 36 | 37 | extern "C" void ctiMasmProbeTrampoline(); 38 | 39 | #if COMPILER(GCC_OR_CLANG) 40 | 41 | // The following are offsets for MacroAssemblerARMv7::ProbeContext fields accessed 42 | // by the ctiMasmProbeTrampoline stub. 43 | 44 | #define PTR_SIZE 4 45 | #define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) 46 | #define PROBE_ARG1_OFFSET (1 * PTR_SIZE) 47 | #define PROBE_ARG2_OFFSET (2 * PTR_SIZE) 48 | 49 | #define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE) 50 | 51 | #define GPREG_SIZE 4 52 | #define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) 53 | #define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) 54 | #define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) 55 | #define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) 56 | #define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) 57 | #define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) 58 | #define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) 59 | #define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) 60 | #define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) 61 | #define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) 62 | #define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) 63 | #define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) 64 | #define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) 65 | #define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) 66 | #define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) 67 | #define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) 68 | 69 | #define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) 70 | #define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) 71 | 72 | #define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) 73 | 74 | #define FPREG_SIZE 8 75 | #define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) 76 | #define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) 77 | #define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) 78 | #define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) 79 | #define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) 80 | #define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) 81 | #define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) 82 | #define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) 83 | #define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) 84 | #define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) 85 | #define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) 86 | #define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) 87 | #define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) 88 | #define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) 89 | #define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) 90 | #define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) 91 | #define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) 92 | #define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE)) 93 | #define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE)) 94 | #define PROBE_CPU_D19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE)) 95 | #define PROBE_CPU_D20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE)) 96 | #define PROBE_CPU_D21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE)) 97 | #define PROBE_CPU_D22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE)) 98 | #define PROBE_CPU_D23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE)) 99 | #define PROBE_CPU_D24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE)) 100 | #define PROBE_CPU_D25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE)) 101 | #define PROBE_CPU_D26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE)) 102 | #define PROBE_CPU_D27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE)) 103 | #define PROBE_CPU_D28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE)) 104 | #define PROBE_CPU_D29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE)) 105 | #define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE)) 106 | #define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE)) 107 | #define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE)) 108 | 109 | // These ASSERTs remind you that if you change the layout of ProbeContext, 110 | // you need to change ctiMasmProbeTrampoline offsets above to match. 111 | #define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARMv7::ProbeContext, x) 112 | COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); 113 | COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); 114 | COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); 115 | 116 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline); 117 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline); 118 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline); 119 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline); 120 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline); 121 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline); 122 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline); 123 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline); 124 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); 125 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); 126 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); 127 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); 128 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline); 129 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); 130 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); 131 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); 132 | 133 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline); 134 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline); 135 | 136 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline); 137 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline); 138 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline); 139 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline); 140 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline); 141 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline); 142 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline); 143 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline); 144 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline); 145 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline); 146 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline); 147 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline); 148 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline); 149 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline); 150 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline); 151 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline); 152 | 153 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline); 154 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline); 155 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline); 156 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d19) == PROBE_CPU_D19_OFFSET, ProbeContext_cpu_d19_offset_matches_ctiMasmProbeTrampoline); 157 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d20) == PROBE_CPU_D20_OFFSET, ProbeContext_cpu_d20_offset_matches_ctiMasmProbeTrampoline); 158 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d21) == PROBE_CPU_D21_OFFSET, ProbeContext_cpu_d21_offset_matches_ctiMasmProbeTrampoline); 159 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d22) == PROBE_CPU_D22_OFFSET, ProbeContext_cpu_d22_offset_matches_ctiMasmProbeTrampoline); 160 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d23) == PROBE_CPU_D23_OFFSET, ProbeContext_cpu_d23_offset_matches_ctiMasmProbeTrampoline); 161 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d24) == PROBE_CPU_D24_OFFSET, ProbeContext_cpu_d24_offset_matches_ctiMasmProbeTrampoline); 162 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d25) == PROBE_CPU_D25_OFFSET, ProbeContext_cpu_d25_offset_matches_ctiMasmProbeTrampoline); 163 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d26) == PROBE_CPU_D26_OFFSET, ProbeContext_cpu_d26_offset_matches_ctiMasmProbeTrampoline); 164 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d27) == PROBE_CPU_D27_OFFSET, ProbeContext_cpu_d27_offset_matches_ctiMasmProbeTrampoline); 165 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu_d28_offset_matches_ctiMasmProbeTrampoline); 166 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline); 167 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline); 168 | COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline); 169 | 170 | COMPILE_ASSERT(sizeof(MacroAssemblerARMv7::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); 171 | 172 | #undef PROBE_OFFSETOF 173 | 174 | asm ( 175 | ".text" "\n" 176 | ".align 2" "\n" 177 | ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" 178 | HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" 179 | ".thumb" "\n" 180 | ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampoline) "\n" 181 | SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" 182 | 183 | // MacroAssemblerARMv7::probe() has already generated code to store some values. 184 | // The top of stack now looks like this: 185 | // esp[0 * ptrSize]: probeFunction 186 | // esp[1 * ptrSize]: arg1 187 | // esp[2 * ptrSize]: arg2 188 | // esp[3 * ptrSize]: saved r0 189 | // esp[4 * ptrSize]: saved ip 190 | // esp[5 * ptrSize]: saved lr 191 | // esp[6 * ptrSize]: saved sp 192 | 193 | "mov ip, sp" "\n" 194 | "mov r0, sp" "\n" 195 | "sub r0, r0, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n" 196 | 197 | // The ARM EABI specifies that the stack needs to be 16 byte aligned. 198 | "bic r0, r0, #0xf" "\n" 199 | "mov sp, r0" "\n" 200 | 201 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 202 | "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R1_OFFSET) "\n" 203 | "stmia lr, { r1-r11 }" "\n" 204 | "mrs lr, APSR" "\n" 205 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 206 | "vmrs lr, FPSCR" "\n" 207 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" 208 | 209 | "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 210 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" 211 | "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 212 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" 213 | "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 214 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" 215 | "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 216 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "]" "\n" 217 | "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 218 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 219 | "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 220 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 221 | "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" 222 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 223 | 224 | "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 225 | 226 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n" 227 | "vstmia.64 ip!, { d0-d15 }" "\n" 228 | "vstmia.64 ip!, { d16-d31 }" "\n" 229 | 230 | "mov fp, sp" "\n" // Save the ProbeContext*. 231 | 232 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" 233 | "mov r0, sp" "\n" // the ProbeContext* arg. 234 | "blx ip" "\n" 235 | 236 | "mov sp, fp" "\n" 237 | 238 | // To enable probes to modify register state, we copy all registers 239 | // out of the ProbeContext before returning. 240 | 241 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n" 242 | "vldmdb.64 ip!, { d16-d31 }" "\n" 243 | "vldmdb.64 ip!, { d0-d15 }" "\n" 244 | 245 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n" 246 | "ldmdb ip, { r0-r11 }" "\n" 247 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" 248 | "vmsr FPSCR, ip" "\n" 249 | 250 | // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr. 251 | // There are 2 issues that complicate the restoration of these last few 252 | // registers: 253 | // 254 | // 1. Normal ARM calling convention relies on moving lr to pc to return to 255 | // the caller. In our case, the address to return to is specified by 256 | // ProbeContext.cpu.pc. And at that moment, we won't have any available 257 | // scratch registers to hold the return address (lr needs to hold 258 | // ProbeContext.cpu.lr, not the return address). 259 | // 260 | // The solution is to store the return address on the stack and load the 261 | // pc from there. 262 | // 263 | // 2. Issue 1 means we will need to write to the stack location at 264 | // ProbeContext.cpu.sp - 4. But if the user probe function had modified 265 | // the value of ProbeContext.cpu.sp to point in the range between 266 | // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for 267 | // Issue 1 may trash the values to be restored before we can restore 268 | // them. 269 | // 270 | // The solution is to check if ProbeContext.cpu.sp contains a value in 271 | // the undesirable range. If so, we copy the remaining ProbeContext 272 | // register data to a safe range (at memory lower than where 273 | // ProbeContext.cpu.sp points) first, and restore the remaining register 274 | // from this new range. 275 | 276 | "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n" 277 | "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 278 | "cmp lr, ip" "\n" 279 | "it gt" "\n" 280 | "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" 281 | 282 | // We get here because the new expected stack pointer location is lower 283 | // than where it's supposed to be. This means the safe range of stack 284 | // memory where we'll be copying the remaining register restore values to 285 | // might be in a region of memory below the sp i.e. unallocated stack 286 | // memory. This, in turn, makes it vulnerable to interrupts potentially 287 | // trashing the copied values. To prevent that, we must first allocate the 288 | // needed stack memory by adjusting the sp before the copying. 289 | 290 | "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE) 291 | " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n" 292 | 293 | "mov ip, sp" "\n" 294 | "mov sp, lr" "\n" 295 | "mov lr, ip" "\n" 296 | 297 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 298 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 299 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 300 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 301 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 302 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 303 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 304 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 305 | "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 306 | "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 307 | 308 | ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampolineEnd) "\n" 309 | SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" 310 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" 311 | "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 312 | "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n" 313 | "str ip, [lr]" "\n" 314 | "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 315 | 316 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" 317 | "msr APSR, ip" "\n" 318 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" 319 | "mov lr, ip" "\n" 320 | "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" 321 | "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" 322 | 323 | "pop { pc }" "\n" 324 | ); 325 | #endif // COMPILER(GCC_OR_CLANG) 326 | 327 | void MacroAssemblerARMv7::probe(MacroAssemblerARMv7::ProbeFunction function, void* arg1, void* arg2) 328 | { 329 | push(RegisterID::lr); 330 | push(RegisterID::lr); 331 | add32(TrustedImm32(8), RegisterID::sp, RegisterID::lr); 332 | store32(RegisterID::lr, ArmAddress(RegisterID::sp, 4)); 333 | push(RegisterID::ip); 334 | push(RegisterID::r0); 335 | // The following uses RegisterID::ip. So, they must come after we push ip above. 336 | push(trustedImm32FromPtr(arg2)); 337 | push(trustedImm32FromPtr(arg1)); 338 | push(trustedImm32FromPtr(function)); 339 | 340 | move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::ip); 341 | m_assembler.blx(RegisterID::ip); 342 | } 343 | #endif // ENABLE(MASM_PROBE) 344 | 345 | } // namespace JSC 346 | 347 | #endif // ENABLE(ASSEMBLER) 348 | 349 | -------------------------------------------------------------------------------- /MacroAssemblerCodeRef.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2016 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #include "config.h" 27 | #include "MacroAssemblerCodeRef.h" 28 | //#include "disassembler/Disassembler.h" 29 | //#include 30 | 31 | //#include "JSCInlines.h" 32 | //#include "LLIntData.h" 33 | 34 | namespace JSC { 35 | 36 | MacroAssemblerCodePtr MacroAssemblerCodePtr::createLLIntCodePtr(OpcodeID codeId) 37 | { 38 | return createFromExecutableAddress(NULL/*LLInt::getCodePtr(codeId)*/); 39 | } 40 | 41 | void MacroAssemblerCodePtr::dumpWithName(const char* name, PrintStream& out) const 42 | { 43 | if (!m_value) { 44 | out.print(name, "(null)"); 45 | return; 46 | } 47 | if (executableAddress() == dataLocation()) { 48 | out.print(name, "(", RawPointer(executableAddress()), ")"); 49 | return; 50 | } 51 | out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")"); 52 | } 53 | 54 | void MacroAssemblerCodePtr::dump(PrintStream& out) const 55 | { 56 | dumpWithName("CodePtr", out); 57 | } 58 | 59 | MacroAssemblerCodeRef MacroAssemblerCodeRef::createLLIntCodeRef(OpcodeID codeId) 60 | { 61 | return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(NULL/*LLInt::getCodePtr(codeId)*/)); 62 | } 63 | 64 | //bool MacroAssemblerCodeRef::tryToDisassemble(PrintStream& out, const char* prefix) const 65 | //{ 66 | // return JSC::tryToDisassemble(m_codePtr, size(), prefix, out); 67 | //} 68 | // 69 | //bool MacroAssemblerCodeRef::tryToDisassemble(const char* prefix) const 70 | //{ 71 | // return tryToDisassemble(WTF::dataFile(), prefix); 72 | //} 73 | // 74 | //CString MacroAssemblerCodeRef::disassembly() const 75 | //{ 76 | // StringPrintStream out; 77 | // if (!tryToDisassemble(out, "")) 78 | // return CString(); 79 | // return out.toCString(); 80 | //} 81 | 82 | void MacroAssemblerCodeRef::dump(PrintStream& out) const 83 | { 84 | m_codePtr.dumpWithName("CodeRef", out); 85 | } 86 | 87 | } // namespace JSC 88 | 89 | -------------------------------------------------------------------------------- /MacroAssemblerCodeRef.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2009, 2012, 2016 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef MacroAssemblerCodeRef_h 27 | #define MacroAssemblerCodeRef_h 28 | 29 | #include "ExecutableAllocator.h" 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | 36 | // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid 37 | // instruction address on the platform (for example, check any alignment requirements). 38 | #if CPU(ARM_THUMB2) && ENABLE(JIT) 39 | // ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into 40 | // into the processor are decorated with the bottom bit set, while traditional ARM has 41 | // the lower bit clear. Since we don't know what kind of pointer, we check for both 42 | // decorated and undecorated null. 43 | #define ASSERT_VALID_CODE_POINTER(ptr) \ 44 | ASSERT(reinterpret_cast(ptr) & ~1) 45 | #define ASSERT_VALID_CODE_OFFSET(offset) \ 46 | ASSERT(!(offset & 1)) // Must be multiple of 2. 47 | #else 48 | #define ASSERT_VALID_CODE_POINTER(ptr) \ 49 | ASSERT(ptr) 50 | #define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes! 51 | #endif 52 | 53 | namespace JSC { 54 | 55 | enum OpcodeID : unsigned; 56 | 57 | // FunctionPtr: 58 | // 59 | // FunctionPtr should be used to wrap pointers to C/C++ functions in JSC 60 | // (particularly, the stub functions). 61 | class FunctionPtr { 62 | public: 63 | FunctionPtr() 64 | : m_value(0) 65 | { 66 | } 67 | 68 | template 69 | FunctionPtr(returnType(*value)()) 70 | : m_value((void*)value) 71 | { 72 | ASSERT_VALID_CODE_POINTER(m_value); 73 | } 74 | 75 | template 76 | FunctionPtr(returnType(*value)(argType1)) 77 | : m_value((void*)value) 78 | { 79 | ASSERT_VALID_CODE_POINTER(m_value); 80 | } 81 | 82 | template 83 | FunctionPtr(returnType(*value)(argType1, argType2)) 84 | : m_value((void*)value) 85 | { 86 | ASSERT_VALID_CODE_POINTER(m_value); 87 | } 88 | 89 | template 90 | FunctionPtr(returnType(*value)(argType1, argType2, argType3)) 91 | : m_value((void*)value) 92 | { 93 | ASSERT_VALID_CODE_POINTER(m_value); 94 | } 95 | 96 | template 97 | FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4)) 98 | : m_value((void*)value) 99 | { 100 | ASSERT_VALID_CODE_POINTER(m_value); 101 | } 102 | 103 | template 104 | FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5)) 105 | : m_value((void*)value) 106 | { 107 | ASSERT_VALID_CODE_POINTER(m_value); 108 | } 109 | 110 | template 111 | FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6)) 112 | : m_value((void*)value) 113 | { 114 | ASSERT_VALID_CODE_POINTER(m_value); 115 | } 116 | // MSVC doesn't seem to treat functions with different calling conventions as 117 | // different types; these methods already defined for fastcall, below. 118 | #if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS) 119 | 120 | template 121 | FunctionPtr(returnType (CDECL *value)()) 122 | : m_value((void*)value) 123 | { 124 | ASSERT_VALID_CODE_POINTER(m_value); 125 | } 126 | 127 | template 128 | FunctionPtr(returnType (CDECL *value)(argType1)) 129 | : m_value((void*)value) 130 | { 131 | ASSERT_VALID_CODE_POINTER(m_value); 132 | } 133 | 134 | template 135 | FunctionPtr(returnType (CDECL *value)(argType1, argType2)) 136 | : m_value((void*)value) 137 | { 138 | ASSERT_VALID_CODE_POINTER(m_value); 139 | } 140 | 141 | template 142 | FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3)) 143 | : m_value((void*)value) 144 | { 145 | ASSERT_VALID_CODE_POINTER(m_value); 146 | } 147 | 148 | template 149 | FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4)) 150 | : m_value((void*)value) 151 | { 152 | ASSERT_VALID_CODE_POINTER(m_value); 153 | } 154 | #endif 155 | 156 | #if COMPILER_SUPPORTS(FASTCALL_CALLING_CONVENTION) 157 | 158 | template 159 | FunctionPtr(returnType (FASTCALL *value)()) 160 | : m_value((void*)value) 161 | { 162 | ASSERT_VALID_CODE_POINTER(m_value); 163 | } 164 | 165 | template 166 | FunctionPtr(returnType (FASTCALL *value)(argType1)) 167 | : m_value((void*)value) 168 | { 169 | ASSERT_VALID_CODE_POINTER(m_value); 170 | } 171 | 172 | template 173 | FunctionPtr(returnType (FASTCALL *value)(argType1, argType2)) 174 | : m_value((void*)value) 175 | { 176 | ASSERT_VALID_CODE_POINTER(m_value); 177 | } 178 | 179 | template 180 | FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3)) 181 | : m_value((void*)value) 182 | { 183 | ASSERT_VALID_CODE_POINTER(m_value); 184 | } 185 | 186 | template 187 | FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4)) 188 | : m_value((void*)value) 189 | { 190 | ASSERT_VALID_CODE_POINTER(m_value); 191 | } 192 | #endif 193 | 194 | template 195 | explicit FunctionPtr(FunctionType* value) 196 | // Using a C-ctyle cast here to avoid compiler error on RVTC: 197 | // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers 198 | // (I guess on RVTC function pointers have a different constness to GCC/MSVC?) 199 | : m_value((void*)value) 200 | { 201 | ASSERT_VALID_CODE_POINTER(m_value); 202 | } 203 | 204 | void* value() const { return m_value; } 205 | void* executableAddress() const { return m_value; } 206 | 207 | 208 | private: 209 | void* m_value; 210 | }; 211 | 212 | // ReturnAddressPtr: 213 | // 214 | // ReturnAddressPtr should be used to wrap return addresses generated by processor 215 | // 'call' instructions exectued in JIT code. We use return addresses to look up 216 | // exception and optimization information, and to repatch the call instruction 217 | // that is the source of the return address. 218 | class ReturnAddressPtr { 219 | public: 220 | ReturnAddressPtr() 221 | : m_value(0) 222 | { 223 | } 224 | 225 | explicit ReturnAddressPtr(void* value) 226 | : m_value(value) 227 | { 228 | ASSERT_VALID_CODE_POINTER(m_value); 229 | } 230 | 231 | explicit ReturnAddressPtr(FunctionPtr function) 232 | : m_value(function.value()) 233 | { 234 | ASSERT_VALID_CODE_POINTER(m_value); 235 | } 236 | 237 | void* value() const { return m_value; } 238 | 239 | void dump(PrintStream& out) const 240 | { 241 | out.print(RawPointer(m_value)); 242 | } 243 | 244 | private: 245 | void* m_value; 246 | }; 247 | 248 | // MacroAssemblerCodePtr: 249 | // 250 | // MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code. 251 | class MacroAssemblerCodePtr { 252 | public: 253 | MacroAssemblerCodePtr() 254 | : m_value(0) 255 | { 256 | } 257 | 258 | explicit MacroAssemblerCodePtr(void* value) 259 | #if CPU(ARM_THUMB2) 260 | // Decorate the pointer as a thumb code pointer. 261 | : m_value(reinterpret_cast(value) + 1) 262 | #else 263 | : m_value(value) 264 | #endif 265 | { 266 | ASSERT_VALID_CODE_POINTER(m_value); 267 | } 268 | 269 | static MacroAssemblerCodePtr createFromExecutableAddress(void* value) 270 | { 271 | ASSERT_VALID_CODE_POINTER(value); 272 | MacroAssemblerCodePtr result; 273 | result.m_value = value; 274 | return result; 275 | } 276 | 277 | static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId); 278 | 279 | explicit MacroAssemblerCodePtr(ReturnAddressPtr ra) 280 | : m_value(ra.value()) 281 | { 282 | ASSERT_VALID_CODE_POINTER(m_value); 283 | } 284 | 285 | void* executableAddress() const { return m_value; } 286 | #if CPU(ARM_THUMB2) 287 | // To use this pointer as a data address remove the decoration. 288 | void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast(m_value) - 1; } 289 | #else 290 | void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; } 291 | #endif 292 | 293 | explicit operator bool() const { return m_value; } 294 | 295 | bool operator==(const MacroAssemblerCodePtr& other) const 296 | { 297 | return m_value == other.m_value; 298 | } 299 | 300 | void dumpWithName(const char* name, PrintStream& out) const; 301 | 302 | void dump(PrintStream& out) const; 303 | 304 | enum EmptyValueTag { EmptyValue }; 305 | enum DeletedValueTag { DeletedValue }; 306 | 307 | MacroAssemblerCodePtr(EmptyValueTag) 308 | : m_value(emptyValue()) 309 | { 310 | } 311 | 312 | MacroAssemblerCodePtr(DeletedValueTag) 313 | : m_value(deletedValue()) 314 | { 315 | } 316 | 317 | bool isEmptyValue() const { return m_value == emptyValue(); } 318 | bool isDeletedValue() const { return m_value == deletedValue(); } 319 | 320 | unsigned hash() const { return PtrHash::hash(m_value); } 321 | 322 | private: 323 | static void* emptyValue() { return bitwise_cast(static_cast(1)); } 324 | static void* deletedValue() { return bitwise_cast(static_cast(2)); } 325 | 326 | void* m_value; 327 | }; 328 | 329 | struct MacroAssemblerCodePtrHash { 330 | static unsigned hash(const MacroAssemblerCodePtr& ptr) { return ptr.hash(); } 331 | static bool equal(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b) 332 | { 333 | return a == b; 334 | } 335 | static const bool safeToCompareToEmptyOrDeleted = true; 336 | }; 337 | 338 | // MacroAssemblerCodeRef: 339 | // 340 | // A reference to a section of JIT generated code. A CodeRef consists of a 341 | // pointer to the code, and a ref pointer to the pool from within which it 342 | // was allocated. 343 | class MacroAssemblerCodeRef { 344 | private: 345 | // This is private because it's dangerous enough that we want uses of it 346 | // to be easy to find - hence the static create method below. 347 | explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr) 348 | : m_codePtr(codePtr) 349 | { 350 | ASSERT(m_codePtr); 351 | } 352 | 353 | public: 354 | MacroAssemblerCodeRef() 355 | { 356 | } 357 | 358 | MacroAssemblerCodeRef(PassRefPtr executableMemory) 359 | : m_codePtr(executableMemory->start()) 360 | , m_executableMemory(executableMemory) 361 | { 362 | ASSERT(m_executableMemory->isManaged()); 363 | ASSERT(m_executableMemory->start()); 364 | ASSERT(m_codePtr); 365 | } 366 | 367 | // Use this only when you know that the codePtr refers to code that is 368 | // already being kept alive through some other means. Typically this means 369 | // that codePtr is immortal. 370 | static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr) 371 | { 372 | return MacroAssemblerCodeRef(codePtr); 373 | } 374 | 375 | // Helper for creating self-managed code refs from LLInt. 376 | static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId); 377 | 378 | ExecutableMemoryHandle* executableMemory() const 379 | { 380 | return m_executableMemory.get(); 381 | } 382 | 383 | MacroAssemblerCodePtr code() const 384 | { 385 | return m_codePtr; 386 | } 387 | 388 | size_t size() const 389 | { 390 | if (!m_executableMemory) 391 | return 0; 392 | return m_executableMemory->sizeInBytes(); 393 | } 394 | 395 | bool tryToDisassemble(PrintStream& out, const char* prefix = "") const; 396 | 397 | bool tryToDisassemble(const char* prefix = "") const; 398 | 399 | JS_EXPORT_PRIVATE CString disassembly() const; 400 | 401 | explicit operator bool() const { return !!m_codePtr; } 402 | 403 | void dump(PrintStream& out) const; 404 | 405 | private: 406 | MacroAssemblerCodePtr m_codePtr; 407 | RefPtr m_executableMemory; 408 | }; 409 | 410 | } // namespace JSC 411 | 412 | namespace WTF { 413 | 414 | template struct DefaultHash; 415 | template<> struct DefaultHash { 416 | typedef JSC::MacroAssemblerCodePtrHash Hash; 417 | }; 418 | 419 | template struct HashTraits; 420 | template<> struct HashTraits : public CustomHashTraits { }; 421 | 422 | } // namespace WTF 423 | 424 | #endif // MacroAssemblerCodeRef_h 425 | -------------------------------------------------------------------------------- /MacroAssemblerPrinter.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #include "config.h" 27 | #include "MacroAssemblerPrinter.h" 28 | 29 | #if ENABLE(MASM_PROBE) 30 | 31 | #include "MacroAssembler.h" 32 | 33 | namespace JSC { 34 | 35 | using CPUState = MacroAssembler::CPUState; 36 | using ProbeContext = MacroAssembler::ProbeContext; 37 | using RegisterID = MacroAssembler::RegisterID; 38 | using FPRegisterID = MacroAssembler::FPRegisterID; 39 | 40 | static void printIndent(int indentation) 41 | { 42 | for (; indentation > 0; indentation--) 43 | dataLog(" "); 44 | } 45 | 46 | #define INDENT printIndent(indentation) 47 | 48 | void printCPU(CPUState& cpu, int indentation) 49 | { 50 | INDENT, dataLog("cpu: {\n"); 51 | printCPURegisters(cpu, indentation + 1); 52 | INDENT, dataLog("}\n"); 53 | } 54 | 55 | void printCPURegisters(CPUState& cpu, int indentation) 56 | { 57 | #if USE(JSVALUE32_64) 58 | #define INTPTR_HEX_VALUE_FORMAT "0x%08lx" 59 | #else 60 | #define INTPTR_HEX_VALUE_FORMAT "0x%016lx" 61 | #endif 62 | 63 | #define PRINT_GPREGISTER(_type, _regName) { \ 64 | intptr_t value = reinterpret_cast(cpu._regName); \ 65 | INDENT, dataLogF("%6s: " INTPTR_HEX_VALUE_FORMAT " %ld\n", #_regName, value, value) ; \ 66 | } 67 | FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER) 68 | FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER) 69 | #undef PRINT_GPREGISTER 70 | #undef INTPTR_HEX_VALUE_FORMAT 71 | 72 | #define PRINT_FPREGISTER(_type, _regName) { \ 73 | uint64_t* u = reinterpret_cast(&cpu._regName); \ 74 | double* d = reinterpret_cast(&cpu._regName); \ 75 | INDENT, dataLogF("%6s: 0x%016llx %.13g\n", #_regName, *u, *d); \ 76 | } 77 | FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER) 78 | #undef PRINT_FPREGISTER 79 | } 80 | 81 | static void printPC(CPUState& cpu) 82 | { 83 | union { 84 | void* voidPtr; 85 | intptr_t intptrValue; 86 | } u; 87 | #if CPU(X86) || CPU(X86_64) 88 | u.voidPtr = cpu.eip; 89 | #elif CPU(ARM_TRADITIONAL) || CPU(ARM_THUMB2) || CPU(ARM64) 90 | u.voidPtr = cpu.pc; 91 | #else 92 | #error "Unsupported CPU" 93 | #endif 94 | dataLogF("pc:<%p %ld>", u.voidPtr, u.intptrValue); 95 | } 96 | 97 | void printRegister(CPUState& cpu, RegisterID regID) 98 | { 99 | const char* name = CPUState::gprName(regID); 100 | union { 101 | void* voidPtr; 102 | intptr_t intptrValue; 103 | } u; 104 | u.voidPtr = cpu.gpr(regID); 105 | dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue); 106 | } 107 | 108 | void printRegister(CPUState& cpu, FPRegisterID regID) 109 | { 110 | const char* name = CPUState::fprName(regID); 111 | union { 112 | double doubleValue; 113 | uint64_t uint64Value; 114 | } u; 115 | u.doubleValue = cpu.fpr(regID); 116 | dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue); 117 | } 118 | 119 | void printMemory(CPUState& cpu, const Memory& memory) 120 | { 121 | uint8_t* ptr = nullptr; 122 | switch (memory.addressType) { 123 | case Memory::AddressType::Address: { 124 | ptr = reinterpret_cast(cpu.gpr(memory.u.address.base)); 125 | ptr += memory.u.address.offset; 126 | break; 127 | } 128 | case Memory::AddressType::AbsoluteAddress: { 129 | ptr = reinterpret_cast(const_cast(memory.u.absoluteAddress.m_ptr)); 130 | break; 131 | } 132 | } 133 | 134 | if (memory.dumpStyle == Memory::SingleWordDump) { 135 | if (memory.numBytes == sizeof(int8_t)) { 136 | auto p = reinterpret_cast(ptr); 137 | dataLogF("%p:<0x%02x %d>", p, *p, *p); 138 | return; 139 | } 140 | if (memory.numBytes == sizeof(int16_t)) { 141 | auto p = reinterpret_cast(ptr); 142 | dataLogF("%p:<0x%04x %d>", p, *p, *p); 143 | return; 144 | } 145 | if (memory.numBytes == sizeof(int32_t)) { 146 | auto p = reinterpret_cast(ptr); 147 | dataLogF("%p:<0x%08x %d>", p, *p, *p); 148 | return; 149 | } 150 | if (memory.numBytes == sizeof(int64_t)) { 151 | auto p = reinterpret_cast(ptr); 152 | dataLogF("%p:<0x%016llx %lld>", p, *p, *p); 153 | return; 154 | } 155 | // Else, unknown word size. Fall thru and dump in the generic way. 156 | } 157 | 158 | // Generic dump: dump rows of 16 bytes in 4 byte groupings. 159 | size_t numBytes = memory.numBytes; 160 | for (size_t i = 0; i < numBytes; i++) { 161 | if (!(i % 16)) 162 | dataLogF("%p: ", &ptr[i]); 163 | else if (!(i % 4)) 164 | dataLog(" "); 165 | 166 | dataLogF("%02x", ptr[i]); 167 | 168 | if (i % 16 == 15) 169 | dataLog("\n"); 170 | } 171 | if (numBytes % 16 < 15) 172 | dataLog("\n"); 173 | } 174 | 175 | void MacroAssemblerPrinter::printCallback(ProbeContext* context) 176 | { 177 | typedef PrintArg Arg; 178 | PrintArgsList& argsList = 179 | *reinterpret_cast(context->arg1); 180 | for (size_t i = 0; i < argsList.size(); i++) { 181 | auto& arg = argsList[i]; 182 | switch (arg.type) { 183 | case Arg::Type::AllRegisters: 184 | printCPU(context->cpu, 1); 185 | break; 186 | case Arg::Type::PCRegister: 187 | printPC(context->cpu); 188 | break; 189 | case Arg::Type::RegisterID: 190 | printRegister(context->cpu, arg.u.gpRegisterID); 191 | break; 192 | case Arg::Type::FPRegisterID: 193 | printRegister(context->cpu, arg.u.fpRegisterID); 194 | break; 195 | case Arg::Type::Memory: 196 | printMemory(context->cpu, arg.u.memory); 197 | break; 198 | case Arg::Type::ConstCharPtr: 199 | dataLog(arg.u.constCharPtr); 200 | break; 201 | case Arg::Type::ConstVoidPtr: 202 | dataLogF("%p", arg.u.constVoidPtr); 203 | break; 204 | case Arg::Type::IntptrValue: 205 | dataLog(arg.u.intptrValue); 206 | break; 207 | case Arg::Type::UintptrValue: 208 | dataLog(arg.u.uintptrValue); 209 | break; 210 | } 211 | } 212 | } 213 | 214 | } // namespace JSC 215 | 216 | #endif // ENABLE(MASM_PROBE) 217 | -------------------------------------------------------------------------------- /MacroAssemblerPrinter.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2015 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef MacroAssemblerPrinter_h 27 | #define MacroAssemblerPrinter_h 28 | 29 | #if ENABLE(MASM_PROBE) 30 | 31 | #include "MacroAssembler.h" 32 | 33 | namespace JSC { 34 | 35 | // What is MacroAssembler::print()? 36 | // =============================== 37 | // The MacroAsssembler::print() makes it easy to add print logging 38 | // from JIT compiled code, and can be used to print all types of values 39 | // at runtime e.g. CPU register values being operated on by the compiled 40 | // code. 41 | // 42 | // print() is built on top of MacroAsssembler::probe(), and hence 43 | // inserting logging in JIT compiled code will not perturb register values. 44 | // The only register value that is perturbed is the PC (program counter) 45 | // since there is now more compiled code to do the printing. 46 | // 47 | // How to use the MacroAssembler print()? 48 | // ===================================== 49 | // 1. #include "MacroAssemblerPrinter.h" in the JIT file where you want to use print(). 50 | // 51 | // 2. Add print() calls like these in your JIT code: 52 | // 53 | // jit.print("Hello world\n"); // Emits code to print the string. 54 | // 55 | // CodeBlock* cb = ...; 56 | // jit.print(cb, "\n"); // Emits code to print the pointer value. 57 | // 58 | // RegisterID regID = ...; 59 | // jit.print(regID, "\n"); // Emits code to print the register value (not the id). 60 | // 61 | // // Emits code to print all registers. Unlike other items, this prints 62 | // // multiple lines as follows: 63 | // // cpu { 64 | // // eax: 0x123456789 65 | // // ebx: 0x000000abc 66 | // // ... 67 | // // } 68 | // jit.print(AllRegisters()); 69 | // 70 | // jit.print(MemWord(regID), "\n"); // Emits code to print a byte pointed to by the register. 71 | // jit.print(MemWord(regID), "\n"); // Emits code to print a 32-bit word pointed to by the register. 72 | // 73 | // jit.print(MemWord(Address(regID, 23), "\n"); // Emits code to print a byte at the address. 74 | // jit.print(MemWord(AbsoluteAddress(&cb), "\n"); // Emits code to print an intptr_t sized word at the address. 75 | // 76 | // jit.print(Memory(reg, 100), "\n"); // Emits code to print a 100 bytes at the address pointed by the register. 77 | // jit.print(Memory(Address(reg, 4), 100), "\n"); // Emits code to print a 100 bytes at the address. 78 | // 79 | // // Print multiple things at once. This incurs the probe overhead only once 80 | // // to print all the items. 81 | // jit.print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters()); 82 | // 83 | // The type of values that can be printed is encapsulated in the PrintArg struct below. 84 | // 85 | // Note: print() does not automatically insert a '\n' at the end of the line. 86 | // If you want a '\n', you'll have to add it explicitly (as in the examples above). 87 | 88 | 89 | // This is a marker type only used with MacroAssemblerPrinter::print(). 90 | // See MacroAssemblerPrinter::print() below for details. 91 | struct AllRegisters { }; 92 | struct PCRegister { }; 93 | 94 | struct Memory { 95 | using Address = MacroAssembler::Address; 96 | using AbsoluteAddress = MacroAssembler::AbsoluteAddress; 97 | using RegisterID = MacroAssembler::RegisterID; 98 | 99 | enum class AddressType { 100 | Address, 101 | AbsoluteAddress, 102 | }; 103 | 104 | enum DumpStyle { 105 | SingleWordDump, 106 | GenericDump, 107 | }; 108 | 109 | Memory(RegisterID& reg, size_t bytes, DumpStyle style = GenericDump) 110 | : addressType(AddressType::Address) 111 | , dumpStyle(style) 112 | , numBytes(bytes) 113 | { 114 | u.address = Address(reg, 0); 115 | } 116 | 117 | Memory(const Address& address, size_t bytes, DumpStyle style = GenericDump) 118 | : addressType(AddressType::Address) 119 | , dumpStyle(style) 120 | , numBytes(bytes) 121 | { 122 | u.address = address; 123 | } 124 | 125 | Memory(const AbsoluteAddress& address, size_t bytes, DumpStyle style = GenericDump) 126 | : addressType(AddressType::AbsoluteAddress) 127 | , dumpStyle(style) 128 | , numBytes(bytes) 129 | { 130 | u.absoluteAddress = address; 131 | } 132 | 133 | AddressType addressType; 134 | DumpStyle dumpStyle; 135 | size_t numBytes; 136 | union UnionedAddress { 137 | UnionedAddress() { } 138 | 139 | Address address; 140 | AbsoluteAddress absoluteAddress; 141 | } u; 142 | }; 143 | 144 | template 145 | struct MemWord : public Memory { 146 | MemWord(RegisterID& reg) 147 | : Memory(reg, sizeof(IntType), Memory::SingleWordDump) 148 | { } 149 | 150 | MemWord(const Address& address) 151 | : Memory(address, sizeof(IntType), Memory::SingleWordDump) 152 | { } 153 | 154 | MemWord(const AbsoluteAddress& address) 155 | : Memory(address, sizeof(IntType), Memory::SingleWordDump) 156 | { } 157 | }; 158 | 159 | 160 | class MacroAssemblerPrinter { 161 | using CPUState = MacroAssembler::CPUState; 162 | using ProbeContext = MacroAssembler::ProbeContext; 163 | using RegisterID = MacroAssembler::RegisterID; 164 | using FPRegisterID = MacroAssembler::FPRegisterID; 165 | 166 | public: 167 | template 168 | static void print(MacroAssembler* masm, Arguments... args) 169 | { 170 | auto argsList = std::make_unique(); 171 | appendPrintArg(argsList.get(), args...); 172 | masm->probe(printCallback, argsList.release(), 0); 173 | } 174 | 175 | private: 176 | struct PrintArg { 177 | 178 | enum class Type { 179 | AllRegisters, 180 | PCRegister, 181 | RegisterID, 182 | FPRegisterID, 183 | Memory, 184 | ConstCharPtr, 185 | ConstVoidPtr, 186 | IntptrValue, 187 | UintptrValue, 188 | }; 189 | 190 | PrintArg(AllRegisters&) 191 | : type(Type::AllRegisters) 192 | { 193 | } 194 | 195 | PrintArg(PCRegister&) 196 | : type(Type::PCRegister) 197 | { 198 | } 199 | 200 | PrintArg(RegisterID regID) 201 | : type(Type::RegisterID) 202 | { 203 | u.gpRegisterID = regID; 204 | } 205 | 206 | PrintArg(FPRegisterID regID) 207 | : type(Type::FPRegisterID) 208 | { 209 | u.fpRegisterID = regID; 210 | } 211 | 212 | PrintArg(const Memory& memory) 213 | : type(Type::Memory) 214 | { 215 | u.memory = memory; 216 | } 217 | 218 | PrintArg(const char* ptr) 219 | : type(Type::ConstCharPtr) 220 | { 221 | u.constCharPtr = ptr; 222 | } 223 | 224 | PrintArg(const void* ptr) 225 | : type(Type::ConstVoidPtr) 226 | { 227 | u.constVoidPtr = ptr; 228 | } 229 | 230 | PrintArg(int value) 231 | : type(Type::IntptrValue) 232 | { 233 | u.intptrValue = value; 234 | } 235 | 236 | PrintArg(unsigned value) 237 | : type(Type::UintptrValue) 238 | { 239 | u.intptrValue = value; 240 | } 241 | 242 | PrintArg(intptr_t value) 243 | : type(Type::IntptrValue) 244 | { 245 | u.intptrValue = value; 246 | } 247 | 248 | PrintArg(uintptr_t value) 249 | : type(Type::UintptrValue) 250 | { 251 | u.uintptrValue = value; 252 | } 253 | 254 | Type type; 255 | union Value { 256 | Value() { } 257 | 258 | RegisterID gpRegisterID; 259 | FPRegisterID fpRegisterID; 260 | Memory memory; 261 | const char* constCharPtr; 262 | const void* constVoidPtr; 263 | intptr_t intptrValue; 264 | uintptr_t uintptrValue; 265 | } u; 266 | }; 267 | 268 | typedef Vector PrintArgsList; 269 | 270 | template 271 | static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs) 272 | { 273 | argsList->append(PrintArg(firstArg)); 274 | appendPrintArg(argsList, otherArgs...); 275 | } 276 | 277 | static void appendPrintArg(PrintArgsList*) { } 278 | 279 | private: 280 | static void printCallback(ProbeContext*); 281 | }; 282 | 283 | template 284 | void MacroAssembler::print(Arguments... args) 285 | { 286 | MacroAssemblerPrinter::print(this, args...); 287 | } 288 | 289 | 290 | // These printers will print a block of information. That block may be 291 | // indented with the specified indentation. 292 | void printCPU(MacroAssembler::CPUState&, int indentation = 0); 293 | void printCPURegisters(MacroAssembler::CPUState&, int indentation = 0); 294 | 295 | // These printers will print the specified information in line in the 296 | // print stream. Hence, no indentation will be applied. 297 | void printRegister(MacroAssembler::CPUState&, MacroAssembler::RegisterID); 298 | void printRegister(MacroAssembler::CPUState&, MacroAssembler::FPRegisterID); 299 | void printMemory(MacroAssembler::CPUState&, const Memory&); 300 | 301 | } // namespace JSC 302 | 303 | #endif // ENABLE(MASM_PROBE) 304 | 305 | #endif // MacroAssemblerPrinter_h 306 | -------------------------------------------------------------------------------- /MacroAssemblerX86.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2008, 2014 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef MacroAssemblerX86_h 27 | #define MacroAssemblerX86_h 28 | 29 | #if ENABLE(ASSEMBLER) && CPU(X86) 30 | 31 | #include "MacroAssemblerX86Common.h" 32 | 33 | namespace JSC { 34 | 35 | class MacroAssemblerX86 : public MacroAssemblerX86Common { 36 | public: 37 | static const unsigned numGPRs = 8; 38 | static const unsigned numFPRs = 8; 39 | 40 | static const Scale ScalePtr = TimesFour; 41 | 42 | using MacroAssemblerX86Common::add32; 43 | using MacroAssemblerX86Common::and32; 44 | using MacroAssemblerX86Common::branchAdd32; 45 | using MacroAssemblerX86Common::branchSub32; 46 | using MacroAssemblerX86Common::sub32; 47 | using MacroAssemblerX86Common::or32; 48 | using MacroAssemblerX86Common::load32; 49 | using MacroAssemblerX86Common::load8; 50 | using MacroAssemblerX86Common::store32; 51 | using MacroAssemblerX86Common::store8; 52 | using MacroAssemblerX86Common::branch32; 53 | using MacroAssemblerX86Common::call; 54 | using MacroAssemblerX86Common::jump; 55 | using MacroAssemblerX86Common::addDouble; 56 | using MacroAssemblerX86Common::loadDouble; 57 | using MacroAssemblerX86Common::storeDouble; 58 | using MacroAssemblerX86Common::convertInt32ToDouble; 59 | using MacroAssemblerX86Common::branch8; 60 | using MacroAssemblerX86Common::branchTest8; 61 | 62 | void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) 63 | { 64 | m_assembler.leal_mr(imm.m_value, src, dest); 65 | } 66 | 67 | void add32(TrustedImm32 imm, AbsoluteAddress address) 68 | { 69 | m_assembler.addl_im(imm.m_value, address.m_ptr); 70 | } 71 | 72 | void add32(AbsoluteAddress address, RegisterID dest) 73 | { 74 | m_assembler.addl_mr(address.m_ptr, dest); 75 | } 76 | 77 | void add64(TrustedImm32 imm, AbsoluteAddress address) 78 | { 79 | m_assembler.addl_im(imm.m_value, address.m_ptr); 80 | m_assembler.adcl_im(imm.m_value >> 31, reinterpret_cast(address.m_ptr) + sizeof(int32_t)); 81 | } 82 | 83 | void and32(TrustedImm32 imm, AbsoluteAddress address) 84 | { 85 | m_assembler.andl_im(imm.m_value, address.m_ptr); 86 | } 87 | 88 | void or32(TrustedImm32 imm, AbsoluteAddress address) 89 | { 90 | m_assembler.orl_im(imm.m_value, address.m_ptr); 91 | } 92 | 93 | void or32(RegisterID reg, AbsoluteAddress address) 94 | { 95 | m_assembler.orl_rm(reg, address.m_ptr); 96 | } 97 | 98 | void sub32(TrustedImm32 imm, AbsoluteAddress address) 99 | { 100 | m_assembler.subl_im(imm.m_value, address.m_ptr); 101 | } 102 | 103 | void load32(const void* address, RegisterID dest) 104 | { 105 | m_assembler.movl_mr(address, dest); 106 | } 107 | 108 | void load8(const void* address, RegisterID dest) 109 | { 110 | m_assembler.movzbl_mr(address, dest); 111 | } 112 | 113 | void abortWithReason(AbortReason reason) 114 | { 115 | move(TrustedImm32(reason), X86Registers::eax); 116 | breakpoint(); 117 | } 118 | 119 | void abortWithReason(AbortReason reason, intptr_t misc) 120 | { 121 | move(TrustedImm32(misc), X86Registers::edx); 122 | abortWithReason(reason); 123 | } 124 | 125 | ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) 126 | { 127 | ConvertibleLoadLabel result = ConvertibleLoadLabel(this); 128 | m_assembler.movl_mr(address.offset, address.base, dest); 129 | return result; 130 | } 131 | 132 | void addDouble(AbsoluteAddress address, FPRegisterID dest) 133 | { 134 | m_assembler.addsd_mr(address.m_ptr, dest); 135 | } 136 | 137 | void storeDouble(FPRegisterID src, TrustedImmPtr address) 138 | { 139 | ASSERT(isSSE2Present()); 140 | ASSERT(address.m_value); 141 | m_assembler.movsd_rm(src, address.m_value); 142 | } 143 | 144 | void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) 145 | { 146 | m_assembler.cvtsi2sd_mr(src.m_ptr, dest); 147 | } 148 | 149 | void store32(TrustedImm32 imm, void* address) 150 | { 151 | m_assembler.movl_i32m(imm.m_value, address); 152 | } 153 | 154 | void store32(RegisterID src, void* address) 155 | { 156 | m_assembler.movl_rm(src, address); 157 | } 158 | 159 | void store8(RegisterID src, void* address) 160 | { 161 | m_assembler.movb_rm(src, address); 162 | } 163 | 164 | void store8(TrustedImm32 imm, void* address) 165 | { 166 | TrustedImm32 imm8(static_cast(imm.m_value)); 167 | m_assembler.movb_i8m(imm8.m_value, address); 168 | } 169 | 170 | void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) 171 | { 172 | ASSERT(isSSE2Present()); 173 | m_assembler.pextrw_irr(3, src, dest1); 174 | m_assembler.pextrw_irr(2, src, dest2); 175 | lshift32(TrustedImm32(16), dest1); 176 | or32(dest1, dest2); 177 | moveFloatTo32(src, dest1); 178 | } 179 | 180 | void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch) 181 | { 182 | move32ToFloat(src1, dest); 183 | move32ToFloat(src2, scratch); 184 | lshiftPacked(TrustedImm32(32), scratch); 185 | orPacked(scratch, dest); 186 | } 187 | 188 | Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) 189 | { 190 | m_assembler.addl_im(imm.m_value, dest.m_ptr); 191 | return Jump(m_assembler.jCC(x86Condition(cond))); 192 | } 193 | 194 | Jump branchSub32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) 195 | { 196 | m_assembler.subl_im(imm.m_value, dest.m_ptr); 197 | return Jump(m_assembler.jCC(x86Condition(cond))); 198 | } 199 | 200 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) 201 | { 202 | m_assembler.cmpl_rm(right, left.m_ptr); 203 | return Jump(m_assembler.jCC(x86Condition(cond))); 204 | } 205 | 206 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) 207 | { 208 | m_assembler.cmpl_im(right.m_value, left.m_ptr); 209 | return Jump(m_assembler.jCC(x86Condition(cond))); 210 | } 211 | 212 | Call call() 213 | { 214 | return Call(m_assembler.call(), Call::Linkable); 215 | } 216 | 217 | // Address is a memory location containing the address to jump to 218 | void jump(AbsoluteAddress address) 219 | { 220 | m_assembler.jmp_m(address.m_ptr); 221 | } 222 | 223 | Call tailRecursiveCall() 224 | { 225 | return Call::fromTailJump(jump()); 226 | } 227 | 228 | Call makeTailRecursiveCall(Jump oldJump) 229 | { 230 | return Call::fromTailJump(oldJump); 231 | } 232 | 233 | 234 | DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) 235 | { 236 | padBeforePatch(); 237 | m_assembler.movl_i32r(initialValue.asIntptr(), dest); 238 | return DataLabelPtr(this); 239 | } 240 | 241 | Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) 242 | { 243 | TrustedImm32 right8(static_cast(right.m_value)); 244 | m_assembler.cmpb_im(right8.m_value, left.m_ptr); 245 | return Jump(m_assembler.jCC(x86Condition(cond))); 246 | } 247 | 248 | Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) 249 | { 250 | TrustedImm32 mask8(static_cast(mask.m_value)); 251 | if (mask8.m_value == -1) 252 | m_assembler.cmpb_im(0, address.m_ptr); 253 | else 254 | m_assembler.testb_im(mask8.m_value, address.m_ptr); 255 | return Jump(m_assembler.jCC(x86Condition(cond))); 256 | } 257 | 258 | Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) 259 | { 260 | padBeforePatch(); 261 | m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left); 262 | dataLabel = DataLabelPtr(this); 263 | return Jump(m_assembler.jCC(x86Condition(cond))); 264 | } 265 | 266 | Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) 267 | { 268 | padBeforePatch(); 269 | m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base); 270 | dataLabel = DataLabelPtr(this); 271 | return Jump(m_assembler.jCC(x86Condition(cond))); 272 | } 273 | 274 | Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) 275 | { 276 | padBeforePatch(); 277 | m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base); 278 | dataLabel = DataLabel32(this); 279 | return Jump(m_assembler.jCC(x86Condition(cond))); 280 | } 281 | 282 | DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) 283 | { 284 | padBeforePatch(); 285 | m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base); 286 | return DataLabelPtr(this); 287 | } 288 | 289 | static bool supportsFloatingPoint() { return isSSE2Present(); } 290 | static bool supportsFloatingPointTruncate() { return isSSE2Present(); } 291 | static bool supportsFloatingPointSqrt() { return isSSE2Present(); } 292 | static bool supportsFloatingPointAbs() { return isSSE2Present(); } 293 | 294 | static FunctionPtr readCallTarget(CodeLocationCall call) 295 | { 296 | intptr_t offset = reinterpret_cast(call.dataLocation())[-1]; 297 | return FunctionPtr(reinterpret_cast(reinterpret_cast(call.dataLocation()) + offset)); 298 | } 299 | 300 | static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } 301 | static bool canJumpReplacePatchableBranch32WithPatch() { return true; } 302 | 303 | static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) 304 | { 305 | const int opcodeBytes = 1; 306 | const int modRMBytes = 1; 307 | const int immediateBytes = 4; 308 | const int totalBytes = opcodeBytes + modRMBytes + immediateBytes; 309 | ASSERT(totalBytes >= maxJumpReplacementSize()); 310 | return label.labelAtOffset(-totalBytes); 311 | } 312 | 313 | static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) 314 | { 315 | const int opcodeBytes = 1; 316 | const int modRMBytes = 1; 317 | const int offsetBytes = 0; 318 | const int immediateBytes = 4; 319 | const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes; 320 | ASSERT(totalBytes >= maxJumpReplacementSize()); 321 | return label.labelAtOffset(-totalBytes); 322 | } 323 | 324 | static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label) 325 | { 326 | const int opcodeBytes = 1; 327 | const int modRMBytes = 1; 328 | const int offsetBytes = 0; 329 | const int immediateBytes = 4; 330 | const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes; 331 | ASSERT(totalBytes >= maxJumpReplacementSize()); 332 | return label.labelAtOffset(-totalBytes); 333 | } 334 | 335 | static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue) 336 | { 337 | X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast(initialValue), reg); 338 | } 339 | 340 | static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address address, void* initialValue) 341 | { 342 | ASSERT(!address.offset); 343 | X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast(initialValue), 0, address.base); 344 | } 345 | 346 | static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue) 347 | { 348 | ASSERT(!address.offset); 349 | X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base); 350 | } 351 | 352 | static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) 353 | { 354 | X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 355 | } 356 | 357 | static void repatchCall(CodeLocationCall call, FunctionPtr destination) 358 | { 359 | X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); 360 | } 361 | 362 | private: 363 | friend class LinkBuffer; 364 | 365 | static void linkCall(void* code, Call call, FunctionPtr function) 366 | { 367 | if (call.isFlagSet(Call::Tail)) 368 | X86Assembler::linkJump(code, call.m_label, function.value()); 369 | else 370 | X86Assembler::linkCall(code, call.m_label, function.value()); 371 | } 372 | }; 373 | 374 | } // namespace JSC 375 | 376 | #endif // ENABLE(ASSEMBLER) 377 | 378 | #endif // MacroAssemblerX86_h 379 | -------------------------------------------------------------------------------- /MaxFrameExtentForSlowPathCall.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. 3 | * 4 | * Redistribution and use in source and binary forms, with or without 5 | * modification, are permitted provided that the following conditions 6 | * are met: 7 | * 1. Redistributions of source code must retain the above copyright 8 | * notice, this list of conditions and the following disclaimer. 9 | * 2. Redistributions in binary form must reproduce the above copyright 10 | * notice, this list of conditions and the following disclaimer in the 11 | * documentation and/or other materials provided with the distribution. 12 | * 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | */ 25 | 26 | #ifndef MaxFrameExtentForSlowPathCall_h 27 | #define MaxFrameExtentForSlowPathCall_h 28 | 29 | #include "Register.h" 30 | #include "StackAlignment.h" 31 | #include 32 | 33 | namespace JSC { 34 | 35 | // The maxFrameExtentForSlowPathCall is the max amount of stack space (in bytes) 36 | // that can be used for outgoing args when calling a slow path C function 37 | // from JS code. 38 | 39 | #if !ENABLE(JIT) 40 | static const size_t maxFrameExtentForSlowPathCall = 0; 41 | 42 | #elif CPU(X86_64) && OS(WINDOWS) 43 | // 4 args in registers, but stack space needs to be allocated for all args. 44 | static const size_t maxFrameExtentForSlowPathCall = 64; 45 | 46 | #elif CPU(X86_64) 47 | // All args in registers. 48 | static const size_t maxFrameExtentForSlowPathCall = 0; 49 | 50 | #elif CPU(X86) 51 | // 7 args on stack (28 bytes). 52 | static const size_t maxFrameExtentForSlowPathCall = 40; 53 | 54 | #elif CPU(ARM64) 55 | // All args in registers. 56 | static const size_t maxFrameExtentForSlowPathCall = 0; 57 | 58 | #elif CPU(ARM) 59 | // First four args in registers, remaining 4 args on stack. 60 | static const size_t maxFrameExtentForSlowPathCall = 24; 61 | 62 | #elif CPU(SH4) 63 | // First four args in registers, remaining 4 args on stack. 64 | static const size_t maxFrameExtentForSlowPathCall = 24; 65 | 66 | #elif CPU(MIPS) 67 | // Though args are in registers, there need to be space on the stack for all args. 68 | static const size_t maxFrameExtentForSlowPathCall = 40; 69 | 70 | #else 71 | #error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall" 72 | 73 | #endif 74 | 75 | COMPILE_ASSERT(!(maxFrameExtentForSlowPathCall % sizeof(Register)), extent_must_be_in_multiples_of_registers); 76 | 77 | #if ENABLE(JIT) 78 | // Make sure that cfr - maxFrameExtentForSlowPathCall bytes will make the stack pointer aligned 79 | COMPILE_ASSERT((maxFrameExtentForSlowPathCall % 16) == 16 - sizeof(CallerFrameAndPC), extent_must_align_stack_from_callframe_pointer); 80 | #endif 81 | 82 | static const size_t maxFrameExtentForSlowPathCallInRegisters = maxFrameExtentForSlowPathCall / sizeof(Register); 83 | 84 | } // namespace JSC 85 | 86 | #endif // MaxFrameExtentForSlowPathCall_h 87 | 88 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | LibMacroassembler 2 | 3 | A JIT assembler from JavascriptCore, used for generating machine code in c++; --------------------------------------------------------------------------------