├── Architecture-mips-shared.cpp ├── Architecture-mips-shared.h ├── Architecture-mips64.cpp ├── Architecture-mips64.h ├── Assembler-mips-shared.cpp ├── Assembler-mips-shared.h ├── Assembler-mips64.cpp ├── Assembler-mips64.h ├── AtomicOperations-mips-shared.h ├── AtomicOperations-ppc64le.h ├── Bailouts-mips-shared.cpp ├── Bailouts-mips64.cpp ├── Bailouts-mips64.h ├── Bailouts-ppc64le.cpp ├── Bailouts-ppc64le.h ├── BaselineCompiler-mips-shared.cpp ├── BaselineCompiler-mips-shared.h ├── BaselineCompiler-mips64.cpp ├── BaselineCompiler-mips64.h ├── BaselineCompiler-ppc64le.cpp ├── BaselineCompiler-ppc64le.h ├── BaselineIC-mips-shared.cpp ├── BaselineIC-mips64.cpp ├── BaselineIC-ppc64le.cpp ├── CodeGenerator-mips-shared.cpp ├── CodeGenerator-mips-shared.h ├── CodeGenerator-mips64.cpp ├── CodeGenerator-mips64.h ├── CodeGenerator-ppc64le.cpp ├── CodeGenerator-ppc64le.h ├── LICENSE ├── LIR-mips-shared.h ├── LIR-mips64.h ├── LIR-ppc64le.h ├── Lowering-mips-shared.cpp ├── Lowering-mips-shared.h ├── Lowering-mips64.cpp ├── Lowering-mips64.h ├── Lowering-ppc64le.cpp ├── Lowering-ppc64le.h ├── MacroAssembler-mips-shared-inl.h ├── MacroAssembler-mips-shared.cpp ├── MacroAssembler-mips-shared.h ├── MacroAssembler-mips64-inl.h ├── MacroAssembler-mips64.cpp ├── MacroAssembler-mips64.h ├── MacroAssembler-ppc64le-inl.h ├── MacroAssembler-ppc64le.cpp ├── MacroAssembler-ppc64le.h ├── MoveEmitter-mips-shared.cpp ├── MoveEmitter-mips-shared.h ├── MoveEmitter-mips64.cpp ├── MoveEmitter-mips64.h ├── MoveEmitter-ppc64le.cpp ├── MoveEmitter-ppc64le.h ├── README.md ├── SharedIC-mips64.cpp ├── SharedIC-ppc64le.cpp ├── SharedICHelpers-mips-shared-inl.h ├── SharedICHelpers-mips-shared.h ├── SharedICHelpers-ppc64le-inl.h ├── SharedICHelpers-ppc64le.h ├── SharedICRegisters-mips64.h ├── SharedICRegisters-ppc64le.h ├── Simulator-mips64.cpp ├── Simulator-mips64.h ├── Trampoline-mips64.cpp └── Trampoline-ppc64le.cpp /Architecture-mips-shared.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips-shared/Architecture-mips-shared.h" 8 | 9 | #include 10 | #include 11 | 12 | #include "jit/RegisterSets.h" 13 | 14 | #define HWCAP_MIPS (1 << 28) 15 | #define HWCAP_LOONGSON (1 << 27) 16 | #define HWCAP_R2 (1 << 26) 17 | #define HWCAP_FPU (1 << 0) 18 | 19 | namespace js { 20 | namespace jit { 21 | 22 | static uint32_t 23 | get_mips_flags() 24 | { 25 | uint32_t flags = HWCAP_MIPS; 26 | 27 | #if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64) 28 | flags |= HWCAP_FPU; 29 | flags |= HWCAP_R2; 30 | #else 31 | # ifdef __linux__ 32 | FILE* fp = fopen("/proc/cpuinfo", "r"); 33 | if (!fp) 34 | return flags; 35 | 36 | char buf[1024]; 37 | memset(buf, 0, sizeof(buf)); 38 | (void)fread(buf, sizeof(char), sizeof(buf) - 1, fp); 39 | fclose(fp); 40 | if (strstr(buf, "FPU")) 41 | flags |= HWCAP_FPU; 42 | if (strstr(buf, "Loongson")) 43 | flags |= HWCAP_LOONGSON; 44 | if (strstr(buf, "mips32r2") || strstr(buf, "mips64r2")) 45 | flags |= HWCAP_R2; 46 | # endif 47 | #endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64 48 | return flags; 49 | } 50 | 51 | static bool check_fpu() 52 | { 53 | return mips_private::Flags & HWCAP_FPU; 54 | } 55 | 56 | static bool check_loongson() 57 | { 58 | return mips_private::Flags & HWCAP_LOONGSON; 59 | } 60 | 61 | static bool check_r2() 62 | { 63 | return mips_private::Flags & HWCAP_R2; 64 | } 65 | 66 | namespace mips_private { 67 | // Cache a local copy so we only have to read /proc/cpuinfo once. 68 | uint32_t Flags = get_mips_flags(); 69 | bool hasFPU = check_fpu();; 70 | bool isLoongson = check_loongson(); 71 | bool hasR2 = check_r2(); 72 | } 73 | 74 | Registers::Code 75 | Registers::FromName(const char* name) 76 | { 77 | for (size_t i = 0; i < Total; i++) { 78 | if (strcmp(GetName(i), name) == 0) 79 | return Code(i); 80 | } 81 | 82 | return Invalid; 83 | } 84 | 85 | } // namespace ion 86 | } // namespace js 87 | 88 | -------------------------------------------------------------------------------- /Architecture-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_Architecture_mips_shared_h 8 | #define jit_mips_shared_Architecture_mips_shared_h 9 | 10 | #include "mozilla/MathAlgorithms.h" 11 | 12 | #include 13 | #include 14 | 15 | #include "jit/shared/Architecture-shared.h" 16 | 17 | #include "js/Utility.h" 18 | 19 | // gcc appears to use _mips_hard_float to denote 20 | // that the target is a hard-float target. 21 | #ifdef _mips_hard_float 22 | #define JS_CODEGEN_MIPS_HARDFP 23 | #endif 24 | 25 | #if (defined(_MIPS_SIM) && (_MIPS_SIM == _ABIO32)) || defined(JS_SIMULATOR_MIPS32) 26 | #define USES_O32_ABI 27 | #elif (defined(_MIPS_SIM) && (_MIPS_SIM == _ABI64)) || defined(JS_SIMULATOR_MIPS64) 28 | #define USES_N64_ABI 29 | #else 30 | #error "Unsupported ABI" 31 | #endif 32 | 33 | namespace js { 34 | namespace jit { 35 | 36 | // How far forward/back can a jump go? Provide a generous buffer for thunks. 37 | static const uint32_t JumpImmediateRange = UINT32_MAX; 38 | 39 | class Registers 40 | { 41 | public: 42 | enum RegisterID { 43 | r0 = 0, 44 | r1, 45 | r2, 46 | r3, 47 | r4, 48 | r5, 49 | r6, 50 | r7, 51 | r8, 52 | r9, 53 | r10, 54 | r11, 55 | r12, 56 | r13, 57 | r14, 58 | r15, 59 | r16, 60 | r17, 61 | r18, 62 | r19, 63 | r20, 64 | r21, 65 | r22, 66 | r23, 67 | r24, 68 | r25, 69 | r26, 70 | r27, 71 | r28, 72 | r29, 73 | r30, 74 | r31, 75 | zero = r0, 76 | at = r1, 77 | v0 = r2, 78 | v1 = r3, 79 | a0 = r4, 80 | a1 = r5, 81 | a2 = r6, 82 | a3 = r7, 83 | #if defined(USES_O32_ABI) 84 | t0 = r8, 85 | t1 = r9, 86 | t2 = r10, 87 | t3 = r11, 88 | t4 = r12, 89 | t5 = r13, 90 | t6 = r14, 91 | t7 = r15, 92 | ta0 = t4, 93 | ta1 = t5, 94 | ta2 = t6, 95 | ta3 = t7, 96 | #elif defined(USES_N64_ABI) 97 | a4 = r8, 98 | a5 = r9, 99 | a6 = r10, 100 | a7 = r11, 101 | t0 = r12, 102 | t1 = r13, 103 | t2 = r14, 104 | t3 = r15, 105 | ta0 = a4, 106 | ta1 = a5, 107 | ta2 = a6, 108 | ta3 = a7, 109 | #endif 110 | s0 = r16, 111 | s1 = r17, 112 | s2 = r18, 113 | s3 = r19, 114 | s4 = r20, 115 | s5 = r21, 116 | s6 = r22, 117 | s7 = r23, 118 | t8 = r24, 119 | t9 = r25, 120 | k0 = r26, 121 | k1 = r27, 122 | gp = r28, 123 | sp = r29, 124 | fp = r30, 125 | ra = r31, 126 | invalid_reg 127 | }; 128 | typedef uint8_t Code; 129 | typedef RegisterID Encoding; 130 | 131 | // Content spilled during bailouts. 132 | union RegisterContent { 133 | uintptr_t r; 134 | }; 135 | 136 | static const char * const RegNames[]; 137 | static const char* GetName(Code code) { 138 | MOZ_ASSERT(code < Total); 139 | return RegNames[code]; 140 | } 141 | static const char* GetName(Encoding i) { 142 | return GetName(Code(i)); 143 | } 144 | 145 | static Code FromName(const char* name); 146 | 147 | static const Encoding StackPointer = sp; 148 | static const Encoding Invalid = invalid_reg; 149 | 150 | static const uint32_t Total = 32; 151 | static const uint32_t Allocatable; 152 | 153 | typedef uint32_t SetType; 154 | static const SetType AllMask = 0xffffffff; 155 | static const SetType SharedArgRegMask = (1 << a0) | (1 << a1) | (1 << a2) | (1 << a3); 156 | static const SetType ArgRegMask; 157 | 158 | static const SetType VolatileMask = 159 | (1 << Registers::v0) | 160 | (1 << Registers::v1) | 161 | (1 << Registers::a0) | 162 | (1 << Registers::a1) | 163 | (1 << Registers::a2) | 164 | (1 << Registers::a3) | 165 | (1 << Registers::t0) | 166 | (1 << Registers::t1) | 167 | (1 << Registers::t2) | 168 | (1 << Registers::t3) | 169 | (1 << Registers::ta0) | 170 | (1 << Registers::ta1) | 171 | (1 << Registers::ta2) | 172 | (1 << Registers::ta3); 173 | 174 | // We use this constant to save registers when entering functions. This 175 | // is why $ra is added here even though it is not "Non Volatile". 176 | static const SetType NonVolatileMask = 177 | (1 << Registers::s0) | 178 | (1 << Registers::s1) | 179 | (1 << Registers::s2) | 180 | (1 << Registers::s3) | 181 | (1 << Registers::s4) | 182 | (1 << Registers::s5) | 183 | (1 << Registers::s6) | 184 | (1 << Registers::s7) | 185 | (1 << Registers::fp) | 186 | (1 << Registers::ra); 187 | 188 | static const SetType WrapperMask = 189 | VolatileMask | // = arguments 190 | (1 << Registers::t0) | // = outReg 191 | (1 << Registers::t1); // = argBase 192 | 193 | static const SetType NonAllocatableMask = 194 | (1 << Registers::zero) | 195 | (1 << Registers::at) | // at = scratch 196 | (1 << Registers::t8) | // t8 = scratch 197 | (1 << Registers::t9) | // t9 = scratch 198 | (1 << Registers::k0) | 199 | (1 << Registers::k1) | 200 | (1 << Registers::gp) | 201 | (1 << Registers::sp) | 202 | (1 << Registers::ra); 203 | 204 | // Registers returned from a JS -> JS call. 205 | static const SetType JSCallMask; 206 | 207 | // Registers returned from a JS -> C call. 208 | static const SetType SharedCallMask = (1 << Registers::v0); 209 | static const SetType CallMask; 210 | 211 | static const SetType AllocatableMask = AllMask & ~NonAllocatableMask; 212 | 213 | static uint32_t SetSize(SetType x) { 214 | static_assert(sizeof(SetType) == 4, "SetType must be 32 bits"); 215 | return mozilla::CountPopulation32(x); 216 | } 217 | static uint32_t FirstBit(SetType x) { 218 | return mozilla::CountTrailingZeroes32(x); 219 | } 220 | static uint32_t LastBit(SetType x) { 221 | return 31 - mozilla::CountLeadingZeroes32(x); 222 | } 223 | }; 224 | 225 | // Smallest integer type that can hold a register bitmask. 226 | typedef uint32_t PackedRegisterMask; 227 | 228 | class FloatRegistersMIPSShared 229 | { 230 | public: 231 | enum FPRegisterID { 232 | f0 = 0, 233 | f1, 234 | f2, 235 | f3, 236 | f4, 237 | f5, 238 | f6, 239 | f7, 240 | f8, 241 | f9, 242 | f10, 243 | f11, 244 | f12, 245 | f13, 246 | f14, 247 | f15, 248 | f16, 249 | f17, 250 | f18, 251 | f19, 252 | f20, 253 | f21, 254 | f22, 255 | f23, 256 | f24, 257 | f25, 258 | f26, 259 | f27, 260 | f28, 261 | f29, 262 | f30, 263 | f31, 264 | invalid_freg 265 | }; 266 | typedef uint32_t Code; 267 | typedef FPRegisterID Encoding; 268 | 269 | // Content spilled during bailouts. 270 | union RegisterContent { 271 | double d; 272 | }; 273 | 274 | static const char* GetName(Encoding code) { 275 | static const char * const Names[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", 276 | "f8", "f9", "f10", "f11", "f12", "f13", 277 | "f14", "f15", "f16", "f17", "f18", "f19", 278 | "f20", "f21", "f22", "f23", "f24", "f25", 279 | "f26", "f27", "f28", "f29", "f30", "f31"}; 280 | return Names[code]; 281 | } 282 | 283 | static const Encoding Invalid = invalid_freg; 284 | 285 | #if defined(JS_CODEGEN_MIPS32) 286 | typedef uint32_t SetType; 287 | #elif defined(JS_CODEGEN_MIPS64) 288 | typedef uint64_t SetType; 289 | #endif 290 | }; 291 | 292 | template 293 | class TypedRegisterSet; 294 | 295 | class FloatRegisterMIPSShared 296 | { 297 | public: 298 | bool isSimd128() const { return false; } 299 | 300 | typedef FloatRegistersMIPSShared::SetType SetType; 301 | 302 | #if defined(JS_CODEGEN_MIPS32) 303 | static uint32_t SetSize(SetType x) { 304 | static_assert(sizeof(SetType) == 4, "SetType must be 32 bits"); 305 | return mozilla::CountPopulation32(x); 306 | } 307 | static uint32_t FirstBit(SetType x) { 308 | static_assert(sizeof(SetType) == 4, "SetType must be 32 bits"); 309 | return mozilla::CountTrailingZeroes32(x); 310 | } 311 | static uint32_t LastBit(SetType x) { 312 | static_assert(sizeof(SetType) == 4, "SetType must be 32 bits"); 313 | return 31 - mozilla::CountLeadingZeroes32(x); 314 | } 315 | #elif defined(JS_CODEGEN_MIPS64) 316 | static uint32_t SetSize(SetType x) { 317 | static_assert(sizeof(SetType) == 8, "SetType must be 64 bits"); 318 | return mozilla::CountPopulation64(x); 319 | } 320 | static uint32_t FirstBit(SetType x) { 321 | static_assert(sizeof(SetType) == 8, "SetType must be 64 bits"); 322 | return mozilla::CountTrailingZeroes64(x); 323 | } 324 | static uint32_t LastBit(SetType x) { 325 | static_assert(sizeof(SetType) == 8, "SetType must be 64 bits"); 326 | return 63 - mozilla::CountLeadingZeroes64(x); 327 | } 328 | #endif 329 | }; 330 | 331 | namespace mips_private { 332 | extern uint32_t Flags; 333 | extern bool hasFPU; 334 | extern bool isLoongson; 335 | extern bool hasR2; 336 | } 337 | 338 | inline uint32_t GetMIPSFlags() { return mips_private::Flags; } 339 | inline bool hasFPU() { return mips_private::hasFPU; } 340 | inline bool isLoongson() { return mips_private::isLoongson; } 341 | inline bool hasR2() { return mips_private::hasR2; } 342 | 343 | // MIPS doesn't have double registers that can NOT be treated as float32. 344 | inline bool 345 | hasUnaliasedDouble() { 346 | return false; 347 | } 348 | 349 | // MIPS64 doesn't support it and on MIPS32 we don't allocate odd single fp 350 | // registers thus not exposing multi aliasing to the jit. 351 | // See comments in Arhitecture-mips32.h. 352 | inline bool 353 | hasMultiAlias() { 354 | return false; 355 | } 356 | 357 | } // namespace jit 358 | } // namespace js 359 | 360 | #endif /* jit_mips_shared_Architecture_mips_shared_h */ 361 | -------------------------------------------------------------------------------- /Architecture-mips64.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips64/Architecture-mips64.h" 8 | 9 | #include "jit/RegisterSets.h" 10 | 11 | namespace js { 12 | namespace jit { 13 | 14 | const char * const Registers::RegNames[] = { "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", 15 | "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", 16 | "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", 17 | "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra" }; 18 | 19 | const uint32_t Allocatable = 22; 20 | 21 | const Registers::SetType Registers::ArgRegMask = 22 | Registers::SharedArgRegMask | 23 | (1 << a4) | (1 << a5) | (1 << a6) | (1 << a7); 24 | 25 | const Registers::SetType Registers::JSCallMask = 26 | (1 << Registers::v1); 27 | 28 | const Registers::SetType Registers::CallMask = 29 | (1 << Registers::v0); 30 | 31 | FloatRegisters::Encoding 32 | FloatRegisters::FromName(const char* name) 33 | { 34 | for (size_t i = 0; i < Total; i++) { 35 | if (strcmp(GetName(Encoding(i)), name) == 0) 36 | return Encoding(i); 37 | } 38 | 39 | return Invalid; 40 | } 41 | 42 | FloatRegister 43 | FloatRegister::singleOverlay() const 44 | { 45 | MOZ_ASSERT(!isInvalid()); 46 | if (kind_ == Codes::Double) 47 | return FloatRegister(reg_, Codes::Single); 48 | return *this; 49 | } 50 | 51 | FloatRegister 52 | FloatRegister::doubleOverlay() const 53 | { 54 | MOZ_ASSERT(!isInvalid()); 55 | if (kind_ != Codes::Double) 56 | return FloatRegister(reg_, Codes::Double); 57 | return *this; 58 | } 59 | 60 | FloatRegisterSet 61 | FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) 62 | { 63 | LiveFloatRegisterSet mod; 64 | for (FloatRegisterIterator iter(s); iter.more(); ++iter) { 65 | if ((*iter).isSingle()) { 66 | // Even for single size registers save complete double register. 67 | mod.addUnchecked((*iter).doubleOverlay()); 68 | } else { 69 | mod.addUnchecked(*iter); 70 | } 71 | } 72 | return mod.set(); 73 | } 74 | 75 | uint32_t 76 | FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) 77 | { 78 | FloatRegisterSet ss = s.reduceSetForPush(); 79 | uint64_t bits = ss.bits(); 80 | // We are only pushing double registers. 81 | MOZ_ASSERT((bits & 0xffffffff) == 0); 82 | uint32_t ret = mozilla::CountPopulation32(bits >> 32) * sizeof(double); 83 | return ret; 84 | } 85 | uint32_t 86 | FloatRegister::getRegisterDumpOffsetInBytes() 87 | { 88 | return id() * sizeof(double); 89 | } 90 | 91 | } // namespace ion 92 | } // namespace js 93 | 94 | -------------------------------------------------------------------------------- /Architecture-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_Architecture_mips64_h 8 | #define jit_mips64_Architecture_mips64_h 9 | 10 | #include "mozilla/MathAlgorithms.h" 11 | 12 | #include 13 | #include 14 | 15 | #include "jit/mips-shared/Architecture-mips-shared.h" 16 | 17 | #include "js/Utility.h" 18 | 19 | namespace js { 20 | namespace jit { 21 | 22 | // Shadow stack space is not required on MIPS64. 23 | static const uint32_t ShadowStackSpace = 0; 24 | 25 | // MIPS64 have 64 bit floating-point coprocessor. There are 32 double 26 | // precision register which can also be used as single precision registers. 27 | class FloatRegisters : public FloatRegistersMIPSShared 28 | { 29 | public: 30 | enum ContentType { 31 | Single, 32 | Double, 33 | NumTypes 34 | }; 35 | 36 | static const char* GetName(uint32_t i) { 37 | MOZ_ASSERT(i < TotalPhys); 38 | return FloatRegistersMIPSShared::GetName(Encoding(i)); 39 | } 40 | 41 | static Encoding FromName(const char* name); 42 | 43 | static const uint32_t Total = 32 * NumTypes; 44 | static const uint32_t Allocatable = 62; 45 | // When saving all registers we only need to do is save double registers. 46 | static const uint32_t TotalPhys = 32; 47 | 48 | static_assert(sizeof(SetType) * 8 >= Total, 49 | "SetType should be large enough to enumerate all registers."); 50 | 51 | // Magic values which are used to duplicate a mask of physical register for 52 | // a specific type of register. A multiplication is used to copy and shift 53 | // the bits of the physical register mask. 54 | static const SetType SpreadSingle = SetType(1) << (uint32_t(Single) * TotalPhys); 55 | static const SetType SpreadDouble = SetType(1) << (uint32_t(Double) * TotalPhys); 56 | static const SetType SpreadScalar = SpreadSingle | SpreadDouble; 57 | static const SetType SpreadVector = 0; 58 | static const SetType Spread = SpreadScalar | SpreadVector; 59 | 60 | static const SetType AllPhysMask = ((SetType(1) << TotalPhys) - 1); 61 | static const SetType AllMask = AllPhysMask * Spread; 62 | static const SetType AllSingleMask = AllPhysMask * SpreadSingle; 63 | static const SetType AllDoubleMask = AllPhysMask * SpreadDouble; 64 | 65 | static const SetType NonVolatileMask = 66 | ( (1U << FloatRegisters::f24) | 67 | (1U << FloatRegisters::f25) | 68 | (1U << FloatRegisters::f26) | 69 | (1U << FloatRegisters::f27) | 70 | (1U << FloatRegisters::f28) | 71 | (1U << FloatRegisters::f29) | 72 | (1U << FloatRegisters::f30) | 73 | (1U << FloatRegisters::f31) 74 | ) * SpreadScalar 75 | | AllPhysMask * SpreadVector; 76 | 77 | static const SetType VolatileMask = AllMask & ~NonVolatileMask; 78 | 79 | static const SetType WrapperMask = VolatileMask; 80 | 81 | static const SetType NonAllocatableMask = 82 | (1U << FloatRegisters::f23) * Spread; 83 | 84 | static const SetType AllocatableMask = AllMask & ~NonAllocatableMask; 85 | }; 86 | 87 | template 88 | class TypedRegisterSet; 89 | 90 | class FloatRegister : public FloatRegisterMIPSShared 91 | { 92 | public: 93 | typedef FloatRegisters Codes; 94 | typedef size_t Code; 95 | typedef Codes::Encoding Encoding; 96 | typedef Codes::ContentType ContentType; 97 | 98 | Encoding reg_: 6; 99 | private: 100 | ContentType kind_ : 3; 101 | 102 | public: 103 | constexpr FloatRegister(uint32_t r, ContentType kind = Codes::Double) 104 | : reg_(Encoding(r)), kind_(kind) 105 | { } 106 | constexpr FloatRegister() 107 | : reg_(Encoding(FloatRegisters::invalid_freg)), kind_(Codes::Double) 108 | { } 109 | 110 | static uint32_t SetSize(SetType x) { 111 | // Count the number of non-aliased registers. 112 | x |= x >> Codes::TotalPhys; 113 | x &= Codes::AllPhysMask; 114 | static_assert(Codes::AllPhysMask <= 0xffffffff, "We can safely use CountPopulation32"); 115 | return mozilla::CountPopulation32(x); 116 | } 117 | 118 | bool operator==(const FloatRegister& other) const { 119 | MOZ_ASSERT(!isInvalid()); 120 | MOZ_ASSERT(!other.isInvalid()); 121 | return kind_ == other.kind_ && reg_ == other.reg_; 122 | } 123 | bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; } 124 | size_t size() const { return (kind_ == Codes::Double) ? sizeof(double) : sizeof (float); } 125 | // Always push doubles to maintain 8-byte stack alignment. 126 | size_t pushSize() const { return sizeof(double); } 127 | bool isInvalid() const { 128 | return reg_ == FloatRegisters::invalid_freg; 129 | } 130 | 131 | bool isSingle() const { return kind_ == Codes::Single; } 132 | bool isDouble() const { return kind_ == Codes::Double; } 133 | 134 | FloatRegister singleOverlay() const; 135 | FloatRegister doubleOverlay() const; 136 | 137 | FloatRegister asSingle() const { return singleOverlay(); } 138 | FloatRegister asDouble() const { return doubleOverlay(); } 139 | FloatRegister asSimd128() const { MOZ_CRASH("NYI"); } 140 | 141 | Code code() const { 142 | MOZ_ASSERT(!isInvalid()); 143 | return Code(reg_ | (kind_ << 5)); 144 | } 145 | Encoding encoding() const { 146 | MOZ_ASSERT(!isInvalid()); 147 | MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys); 148 | return reg_; 149 | } 150 | uint32_t id() const { 151 | return reg_; 152 | } 153 | static FloatRegister FromCode(uint32_t i) { 154 | uint32_t code = i & 0x1f; 155 | uint32_t kind = i >> 5; 156 | return FloatRegister(Code(code), ContentType(kind)); 157 | } 158 | 159 | bool volatile_() const { 160 | return !!((1 << reg_) & FloatRegisters::VolatileMask); 161 | } 162 | const char* name() const { 163 | return FloatRegisters::GetName(reg_); 164 | } 165 | bool operator != (const FloatRegister& other) const { 166 | return kind_ != other.kind_ || reg_ != other.reg_; 167 | } 168 | bool aliases(const FloatRegister& other) { 169 | return reg_ == other.reg_; 170 | } 171 | uint32_t numAliased() const { 172 | return 2; 173 | } 174 | FloatRegister aliased(uint32_t aliasIdx) { 175 | if (aliasIdx == 0) 176 | return *this; 177 | MOZ_ASSERT(aliasIdx == 1); 178 | if (isDouble()) 179 | return singleOverlay(); 180 | return doubleOverlay(); 181 | } 182 | uint32_t numAlignedAliased() const { 183 | return 2; 184 | } 185 | FloatRegister alignedAliased(uint32_t aliasIdx) { 186 | MOZ_ASSERT(isDouble()); 187 | if (aliasIdx == 0) 188 | return *this; 189 | MOZ_ASSERT(aliasIdx == 1); 190 | return singleOverlay(); 191 | } 192 | 193 | SetType alignedOrDominatedAliasedSet() const { 194 | return Codes::Spread << reg_; 195 | } 196 | 197 | static constexpr RegTypeName DefaultType = RegTypeName::Float64; 198 | 199 | template 200 | static SetType LiveAsIndexableSet(SetType s) { 201 | return SetType(0); 202 | } 203 | 204 | template 205 | static SetType AllocatableAsIndexableSet(SetType s) { 206 | static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable"); 207 | return LiveAsIndexableSet(s); 208 | } 209 | 210 | static Code FromName(const char* name) { 211 | return FloatRegisters::FromName(name); 212 | } 213 | static TypedRegisterSet ReduceSetForPush(const TypedRegisterSet& s); 214 | static uint32_t GetPushSizeInBytes(const TypedRegisterSet& s); 215 | uint32_t getRegisterDumpOffsetInBytes(); 216 | }; 217 | 218 | template <> inline FloatRegister::SetType 219 | FloatRegister::LiveAsIndexableSet(SetType set) 220 | { 221 | return set & FloatRegisters::AllSingleMask; 222 | } 223 | 224 | template <> inline FloatRegister::SetType 225 | FloatRegister::LiveAsIndexableSet(SetType set) 226 | { 227 | return set & FloatRegisters::AllDoubleMask; 228 | } 229 | 230 | template <> inline FloatRegister::SetType 231 | FloatRegister::LiveAsIndexableSet(SetType set) 232 | { 233 | return set; 234 | } 235 | 236 | } // namespace jit 237 | } // namespace js 238 | 239 | #endif /* jit_mips64_Architecture_mips64_h */ 240 | -------------------------------------------------------------------------------- /Assembler-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_Assembler_mips64_h 8 | #define jit_mips64_Assembler_mips64_h 9 | 10 | #include "jit/mips-shared/Assembler-mips-shared.h" 11 | 12 | #include "jit/mips64/Architecture-mips64.h" 13 | 14 | namespace js { 15 | namespace jit { 16 | 17 | static constexpr Register CallTempReg4 = a4; 18 | static constexpr Register CallTempReg5 = a5; 19 | 20 | static constexpr Register CallTempNonArgRegs[] = { t0, t1, t2, t3 }; 21 | static const uint32_t NumCallTempNonArgRegs = mozilla::ArrayLength(CallTempNonArgRegs); 22 | 23 | class ABIArgGenerator 24 | { 25 | unsigned usedArgSlots_; 26 | bool firstArgFloat; 27 | ABIArg current_; 28 | 29 | public: 30 | ABIArgGenerator(); 31 | ABIArg next(MIRType argType); 32 | ABIArg& current() { return current_; } 33 | 34 | uint32_t stackBytesConsumedSoFar() const { 35 | if (usedArgSlots_ <= 8) 36 | return 0; 37 | 38 | return (usedArgSlots_ - 8) * sizeof(int64_t); 39 | } 40 | }; 41 | 42 | // These registers may be volatile or nonvolatile. 43 | static constexpr Register ABINonArgReg0 = t0; 44 | static constexpr Register ABINonArgReg1 = t1; 45 | static constexpr Register ABINonArgReg2 = t2; 46 | 47 | // This register may be volatile or nonvolatile. Avoid f23 which is the 48 | // ScratchDoubleReg. 49 | static constexpr FloatRegister ABINonArgDoubleReg { FloatRegisters::f21, FloatRegisters::Double }; 50 | 51 | // These registers may be volatile or nonvolatile. 52 | // Note: these three registers are all guaranteed to be different 53 | static constexpr Register ABINonArgReturnReg0 = t0; 54 | static constexpr Register ABINonArgReturnReg1 = t1; 55 | static constexpr Register ABINonVolatileReg = s0; 56 | 57 | // This register is guaranteed to be clobberable during the prologue and 58 | // epilogue of an ABI call which must preserve both ABI argument, return 59 | // and non-volatile registers. 60 | static constexpr Register ABINonArgReturnVolatileReg = t0; 61 | 62 | // TLS pointer argument register for WebAssembly functions. This must not alias 63 | // any other register used for passing function arguments or return values. 64 | // Preserved by WebAssembly functions. 65 | static constexpr Register WasmTlsReg = s5; 66 | 67 | // Registers used for wasm table calls. These registers must be disjoint 68 | // from the ABI argument registers, WasmTlsReg and each other. 69 | static constexpr Register WasmTableCallScratchReg = ABINonArgReg0; 70 | static constexpr Register WasmTableCallSigReg = ABINonArgReg1; 71 | static constexpr Register WasmTableCallIndexReg = ABINonArgReg2; 72 | 73 | static constexpr Register JSReturnReg = v1; 74 | static constexpr Register JSReturnReg_Type = JSReturnReg; 75 | static constexpr Register JSReturnReg_Data = JSReturnReg; 76 | static constexpr Register64 ReturnReg64(ReturnReg); 77 | static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::f0, FloatRegisters::Single }; 78 | static constexpr FloatRegister ReturnDoubleReg = { FloatRegisters::f0, FloatRegisters::Double }; 79 | static constexpr FloatRegister ScratchFloat32Reg = { FloatRegisters::f23, FloatRegisters::Single }; 80 | static constexpr FloatRegister ScratchDoubleReg = { FloatRegisters::f23, FloatRegisters::Double }; 81 | 82 | struct ScratchFloat32Scope : public AutoFloatRegisterScope 83 | { 84 | explicit ScratchFloat32Scope(MacroAssembler& masm) 85 | : AutoFloatRegisterScope(masm, ScratchFloat32Reg) 86 | { } 87 | }; 88 | 89 | struct ScratchDoubleScope : public AutoFloatRegisterScope 90 | { 91 | explicit ScratchDoubleScope(MacroAssembler& masm) 92 | : AutoFloatRegisterScope(masm, ScratchDoubleReg) 93 | { } 94 | }; 95 | 96 | static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegisters::Double }; 97 | static constexpr FloatRegister f1 = { FloatRegisters::f1, FloatRegisters::Double }; 98 | static constexpr FloatRegister f2 = { FloatRegisters::f2, FloatRegisters::Double }; 99 | static constexpr FloatRegister f3 = { FloatRegisters::f3, FloatRegisters::Double }; 100 | static constexpr FloatRegister f4 = { FloatRegisters::f4, FloatRegisters::Double }; 101 | static constexpr FloatRegister f5 = { FloatRegisters::f5, FloatRegisters::Double }; 102 | static constexpr FloatRegister f6 = { FloatRegisters::f6, FloatRegisters::Double }; 103 | static constexpr FloatRegister f7 = { FloatRegisters::f7, FloatRegisters::Double }; 104 | static constexpr FloatRegister f8 = { FloatRegisters::f8, FloatRegisters::Double }; 105 | static constexpr FloatRegister f9 = { FloatRegisters::f9, FloatRegisters::Double }; 106 | static constexpr FloatRegister f10 = { FloatRegisters::f10, FloatRegisters::Double }; 107 | static constexpr FloatRegister f11 = { FloatRegisters::f11, FloatRegisters::Double }; 108 | static constexpr FloatRegister f12 = { FloatRegisters::f12, FloatRegisters::Double }; 109 | static constexpr FloatRegister f13 = { FloatRegisters::f13, FloatRegisters::Double }; 110 | static constexpr FloatRegister f14 = { FloatRegisters::f14, FloatRegisters::Double }; 111 | static constexpr FloatRegister f15 = { FloatRegisters::f15, FloatRegisters::Double }; 112 | static constexpr FloatRegister f16 = { FloatRegisters::f16, FloatRegisters::Double }; 113 | static constexpr FloatRegister f17 = { FloatRegisters::f17, FloatRegisters::Double }; 114 | static constexpr FloatRegister f18 = { FloatRegisters::f18, FloatRegisters::Double }; 115 | static constexpr FloatRegister f19 = { FloatRegisters::f19, FloatRegisters::Double }; 116 | static constexpr FloatRegister f20 = { FloatRegisters::f20, FloatRegisters::Double }; 117 | static constexpr FloatRegister f21 = { FloatRegisters::f21, FloatRegisters::Double }; 118 | static constexpr FloatRegister f22 = { FloatRegisters::f22, FloatRegisters::Double }; 119 | static constexpr FloatRegister f23 = { FloatRegisters::f23, FloatRegisters::Double }; 120 | static constexpr FloatRegister f24 = { FloatRegisters::f24, FloatRegisters::Double }; 121 | static constexpr FloatRegister f25 = { FloatRegisters::f25, FloatRegisters::Double }; 122 | static constexpr FloatRegister f26 = { FloatRegisters::f26, FloatRegisters::Double }; 123 | static constexpr FloatRegister f27 = { FloatRegisters::f27, FloatRegisters::Double }; 124 | static constexpr FloatRegister f28 = { FloatRegisters::f28, FloatRegisters::Double }; 125 | static constexpr FloatRegister f29 = { FloatRegisters::f29, FloatRegisters::Double }; 126 | static constexpr FloatRegister f30 = { FloatRegisters::f30, FloatRegisters::Double }; 127 | static constexpr FloatRegister f31 = { FloatRegisters::f31, FloatRegisters::Double }; 128 | 129 | // MIPS64 CPUs can only load multibyte data that is "naturally" 130 | // eight-byte-aligned, sp register should be sixteen-byte-aligned. 131 | static constexpr uint32_t ABIStackAlignment = 16; 132 | static constexpr uint32_t JitStackAlignment = 16; 133 | 134 | static constexpr uint32_t JitStackValueAlignment = JitStackAlignment / sizeof(Value); 135 | static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >= 1, 136 | "Stack alignment should be a non-zero multiple of sizeof(Value)"); 137 | 138 | // TODO this is just a filler to prevent a build failure. The MIPS SIMD 139 | // alignment requirements still need to be explored. 140 | // TODO Copy the static_asserts from x64/x86 assembler files. 141 | static constexpr uint32_t SimdMemoryAlignment = 16; 142 | 143 | static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment; 144 | static const uint32_t WasmTrapInstructionLength = 4; 145 | 146 | // Does this architecture support SIMD conversions between Uint32x4 and Float32x4? 147 | static constexpr bool SupportsUint32x4FloatConversions = false; 148 | 149 | // Does this architecture support comparisons of unsigned integer vectors? 150 | static constexpr bool SupportsUint8x16Compares = false; 151 | static constexpr bool SupportsUint16x8Compares = false; 152 | static constexpr bool SupportsUint32x4Compares = false; 153 | 154 | static constexpr Scale ScalePointer = TimesEight; 155 | 156 | class Assembler : public AssemblerMIPSShared 157 | { 158 | public: 159 | Assembler() 160 | : AssemblerMIPSShared() 161 | { } 162 | 163 | static uintptr_t GetPointer(uint8_t*); 164 | 165 | using AssemblerMIPSShared::bind; 166 | 167 | void bind(RepatchLabel* label); 168 | static void Bind(uint8_t* rawCode, const CodeLabel& label); 169 | 170 | void processCodeLabels(uint8_t* rawCode); 171 | 172 | static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); 173 | static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader); 174 | 175 | void bind(InstImm* inst, uintptr_t branch, uintptr_t target); 176 | 177 | // Copy the assembly code to the given buffer, and perform any pending 178 | // relocations relying on the target address. 179 | void executableCopy(uint8_t* buffer, bool flushICache = true); 180 | 181 | static uint32_t PatchWrite_NearCallSize(); 182 | 183 | static uint64_t ExtractLoad64Value(Instruction* inst0); 184 | static void UpdateLoad64Value(Instruction* inst0, uint64_t value); 185 | static void WriteLoad64Instructions(Instruction* inst0, Register reg, uint64_t value); 186 | 187 | 188 | static void PatchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall); 189 | static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, 190 | ImmPtr expectedValue); 191 | static void PatchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, 192 | PatchedImmPtr expectedValue); 193 | 194 | static uint64_t ExtractInstructionImmediate(uint8_t* code); 195 | 196 | static void ToggleCall(CodeLocationLabel inst_, bool enabled); 197 | }; // Assembler 198 | 199 | static const uint32_t NumIntArgRegs = 8; 200 | static const uint32_t NumFloatArgRegs = NumIntArgRegs; 201 | 202 | static inline bool 203 | GetIntArgReg(uint32_t usedArgSlots, Register* out) 204 | { 205 | if (usedArgSlots < NumIntArgRegs) { 206 | *out = Register::FromCode(a0.code() + usedArgSlots); 207 | return true; 208 | } 209 | return false; 210 | } 211 | 212 | static inline bool 213 | GetFloatArgReg(uint32_t usedArgSlots, FloatRegister* out) 214 | { 215 | if (usedArgSlots < NumFloatArgRegs) { 216 | *out = FloatRegister::FromCode(f12.code() + usedArgSlots); 217 | return true; 218 | } 219 | return false; 220 | } 221 | 222 | // Get a register in which we plan to put a quantity that will be used as an 223 | // integer argument. This differs from GetIntArgReg in that if we have no more 224 | // actual argument registers to use we will fall back on using whatever 225 | // CallTempReg* don't overlap the argument registers, and only fail once those 226 | // run out too. 227 | static inline bool 228 | GetTempRegForIntArg(uint32_t usedIntArgs, uint32_t usedFloatArgs, Register* out) 229 | { 230 | // NOTE: We can't properly determine which regs are used if there are 231 | // float arguments. If this is needed, we will have to guess. 232 | MOZ_ASSERT(usedFloatArgs == 0); 233 | 234 | if (GetIntArgReg(usedIntArgs, out)) 235 | return true; 236 | // Unfortunately, we have to assume things about the point at which 237 | // GetIntArgReg returns false, because we need to know how many registers it 238 | // can allocate. 239 | usedIntArgs -= NumIntArgRegs; 240 | if (usedIntArgs >= NumCallTempNonArgRegs) 241 | return false; 242 | *out = CallTempNonArgRegs[usedIntArgs]; 243 | return true; 244 | } 245 | 246 | static inline uint32_t 247 | GetArgStackDisp(uint32_t usedArgSlots) 248 | { 249 | MOZ_ASSERT(usedArgSlots >= NumIntArgRegs); 250 | return (usedArgSlots - NumIntArgRegs) * sizeof(int64_t); 251 | } 252 | 253 | } // namespace jit 254 | } // namespace js 255 | 256 | #endif /* jit_mips64_Assembler_mips64_h */ 257 | -------------------------------------------------------------------------------- /AtomicOperations-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | /* For documentation, see jit/AtomicOperations.h */ 8 | 9 | // NOTE, MIPS32 unlike MIPS64 doesn't provide hardware support for lock-free 10 | // 64-bit atomics. We lie down below about 8-byte atomics being always lock- 11 | // free in order to support wasm jit. The 64-bit atomic for MIPS32 do not use 12 | // __atomic intrinsic and therefore do not relay on -latomic. 13 | // Access to a aspecific 64-bit variable in memory is protected by an AddressLock 14 | // whose instance is shared between jit and AtomicOperations. 15 | 16 | #ifndef jit_mips_shared_AtomicOperations_mips_shared_h 17 | #define jit_mips_shared_AtomicOperations_mips_shared_h 18 | 19 | #include "mozilla/Assertions.h" 20 | #include "mozilla/Types.h" 21 | 22 | #include "builtin/AtomicsObject.h" 23 | #include "vm/ArrayBufferObject.h" 24 | 25 | #if !defined(__clang__) && !defined(__GNUC__) 26 | # error "This file only for gcc-compatible compilers" 27 | #endif 28 | 29 | #if defined(JS_SIMULATOR_MIPS32) && !defined(__i386__) 30 | # error "The MIPS32 simulator atomics assume x86" 31 | #endif 32 | 33 | namespace js { namespace jit { 34 | 35 | #if defined(JS_CODEGEN_MIPS32) 36 | 37 | struct AddressLock 38 | { 39 | public: 40 | void acquire(); 41 | void release(); 42 | private: 43 | uint32_t spinlock; 44 | }; 45 | 46 | static_assert(sizeof(AddressLock) == sizeof(uint32_t), 47 | "AddressLock must be 4 bytes for it to be consumed by jit"); 48 | 49 | // For now use a single global AddressLock. 50 | static AddressLock gAtomic64Lock; 51 | 52 | struct MOZ_RAII AddressGuard 53 | { 54 | explicit AddressGuard(void* addr) 55 | { 56 | gAtomic64Lock.acquire(); 57 | } 58 | 59 | ~AddressGuard() { 60 | gAtomic64Lock.release(); 61 | } 62 | }; 63 | 64 | #endif 65 | 66 | } } 67 | 68 | inline bool 69 | js::jit::AtomicOperations::hasAtomic8() 70 | { 71 | return true; 72 | } 73 | 74 | inline bool 75 | js::jit::AtomicOperations::isLockfree8() 76 | { 77 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0)); 78 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0)); 79 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0)); 80 | # if defined(JS_64BIT) 81 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0)); 82 | # endif 83 | return true; 84 | } 85 | 86 | inline void 87 | js::jit::AtomicOperations::fenceSeqCst() 88 | { 89 | __atomic_thread_fence(__ATOMIC_SEQ_CST); 90 | } 91 | 92 | template 93 | inline T 94 | js::jit::AtomicOperations::loadSeqCst(T* addr) 95 | { 96 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 97 | T v; 98 | __atomic_load(addr, &v, __ATOMIC_SEQ_CST); 99 | return v; 100 | } 101 | 102 | namespace js { namespace jit { 103 | 104 | #if defined(JS_CODEGEN_MIPS32) 105 | 106 | template<> 107 | inline int64_t 108 | js::jit::AtomicOperations::loadSeqCst(int64_t* addr) 109 | { 110 | AddressGuard guard(addr); 111 | return *addr; 112 | } 113 | 114 | template<> 115 | inline uint64_t 116 | js::jit::AtomicOperations::loadSeqCst(uint64_t* addr) 117 | { 118 | AddressGuard guard(addr); 119 | return *addr; 120 | } 121 | 122 | #endif 123 | 124 | } } 125 | 126 | template 127 | inline void 128 | js::jit::AtomicOperations::storeSeqCst(T* addr, T val) 129 | { 130 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 131 | __atomic_store(addr, &val, __ATOMIC_SEQ_CST); 132 | } 133 | 134 | namespace js { namespace jit { 135 | 136 | #if defined(JS_CODEGEN_MIPS32) 137 | 138 | template<> 139 | inline void 140 | js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val) 141 | { 142 | AddressGuard guard(addr); 143 | *addr = val; 144 | } 145 | 146 | template<> 147 | inline void 148 | js::jit::AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val) 149 | { 150 | AddressGuard guard(addr); 151 | *addr = val; 152 | } 153 | 154 | #endif 155 | 156 | } } 157 | 158 | template 159 | inline T 160 | js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) 161 | { 162 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 163 | __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); 164 | return oldval; 165 | } 166 | 167 | namespace js { namespace jit { 168 | 169 | #if defined(JS_CODEGEN_MIPS32) 170 | 171 | template<> 172 | inline int64_t 173 | js::jit::AtomicOperations::compareExchangeSeqCst(int64_t* addr, int64_t oldval, int64_t newval) 174 | { 175 | AddressGuard guard(addr); 176 | int64_t val = *addr; 177 | if (val == oldval) 178 | *addr = newval; 179 | return val; 180 | } 181 | 182 | template<> 183 | inline uint64_t 184 | js::jit::AtomicOperations::compareExchangeSeqCst(uint64_t* addr, uint64_t oldval, uint64_t newval) 185 | { 186 | AddressGuard guard(addr); 187 | uint64_t val = *addr; 188 | if (val == oldval) 189 | *addr = newval; 190 | return val; 191 | } 192 | 193 | #endif 194 | 195 | } } 196 | 197 | template 198 | inline T 199 | js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) 200 | { 201 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 202 | return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST); 203 | } 204 | 205 | namespace js { namespace jit { 206 | 207 | #if defined(JS_CODEGEN_MIPS32) 208 | 209 | template<> 210 | inline int64_t 211 | js::jit::AtomicOperations::fetchAddSeqCst(int64_t* addr, int64_t val) 212 | { 213 | AddressGuard guard(addr); 214 | int64_t old = *addr; 215 | *addr = old + val; 216 | return old; 217 | } 218 | 219 | template<> 220 | inline uint64_t 221 | js::jit::AtomicOperations::fetchAddSeqCst(uint64_t* addr, uint64_t val) 222 | { 223 | AddressGuard guard(addr); 224 | uint64_t old = *addr; 225 | *addr = old + val; 226 | return old; 227 | } 228 | 229 | #endif 230 | 231 | } } 232 | 233 | template 234 | inline T 235 | js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) 236 | { 237 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 238 | return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST); 239 | } 240 | 241 | namespace js { namespace jit { 242 | 243 | #if defined(JS_CODEGEN_MIPS32) 244 | 245 | template<> 246 | inline int64_t 247 | js::jit::AtomicOperations::fetchSubSeqCst(int64_t* addr, int64_t val) 248 | { 249 | AddressGuard guard(addr); 250 | int64_t old = *addr; 251 | *addr = old - val; 252 | return old; 253 | } 254 | 255 | template<> 256 | inline uint64_t 257 | js::jit::AtomicOperations::fetchSubSeqCst(uint64_t* addr, uint64_t val) 258 | { 259 | AddressGuard guard(addr); 260 | uint64_t old = *addr; 261 | *addr = old - val; 262 | return old; 263 | } 264 | 265 | #endif 266 | 267 | } } 268 | 269 | template 270 | inline T 271 | js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) 272 | { 273 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 274 | return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST); 275 | } 276 | 277 | 278 | namespace js { namespace jit { 279 | 280 | #if defined(JS_CODEGEN_MIPS32) 281 | 282 | template<> 283 | inline int64_t 284 | js::jit::AtomicOperations::fetchAndSeqCst(int64_t* addr, int64_t val) 285 | { 286 | AddressGuard guard(addr); 287 | int64_t old = *addr; 288 | *addr = old & val; 289 | return old; 290 | } 291 | 292 | template<> 293 | inline uint64_t 294 | js::jit::AtomicOperations::fetchAndSeqCst(uint64_t* addr, uint64_t val) 295 | { 296 | AddressGuard guard(addr); 297 | uint64_t old = *addr; 298 | *addr = old & val; 299 | return old; 300 | } 301 | 302 | #endif 303 | 304 | } } 305 | 306 | template 307 | inline T 308 | js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) 309 | { 310 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 311 | return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST); 312 | } 313 | 314 | namespace js { namespace jit { 315 | 316 | #if defined(JS_CODEGEN_MIPS32) 317 | 318 | template<> 319 | inline int64_t 320 | js::jit::AtomicOperations::fetchOrSeqCst(int64_t* addr, int64_t val) 321 | { 322 | AddressGuard guard(addr); 323 | int64_t old = *addr; 324 | *addr = old | val; 325 | return old; 326 | } 327 | 328 | template<> 329 | inline uint64_t 330 | js::jit::AtomicOperations::fetchOrSeqCst(uint64_t* addr, uint64_t val) 331 | { 332 | AddressGuard guard(addr); 333 | uint64_t old = *addr; 334 | *addr = old | val; 335 | return old; 336 | } 337 | 338 | #endif 339 | 340 | } } 341 | 342 | template 343 | inline T 344 | js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) 345 | { 346 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 347 | return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST); 348 | 349 | } 350 | 351 | namespace js { namespace jit { 352 | 353 | #if defined(JS_CODEGEN_MIPS32) 354 | 355 | template<> 356 | inline int64_t 357 | js::jit::AtomicOperations::fetchXorSeqCst(int64_t* addr, int64_t val) 358 | { 359 | AddressGuard guard(addr); 360 | int64_t old = *addr; 361 | *addr = old ^ val; 362 | return old; 363 | } 364 | 365 | template<> 366 | inline uint64_t 367 | js::jit::AtomicOperations::fetchXorSeqCst(uint64_t* addr, uint64_t val) 368 | { 369 | AddressGuard guard(addr); 370 | uint64_t old = *addr; 371 | *addr = old ^ val; 372 | return old; 373 | } 374 | 375 | #endif 376 | 377 | } } 378 | 379 | template 380 | inline T 381 | js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) 382 | { 383 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 384 | T v; 385 | __atomic_load(addr, &v, __ATOMIC_RELAXED); 386 | return v; 387 | } 388 | 389 | namespace js { namespace jit { 390 | 391 | #if defined(JS_CODEGEN_MIPS32) 392 | 393 | template<> 394 | inline int64_t 395 | js::jit::AtomicOperations::loadSafeWhenRacy(int64_t* addr) 396 | { 397 | return *addr; 398 | } 399 | 400 | template<> 401 | inline uint64_t 402 | js::jit::AtomicOperations::loadSafeWhenRacy(uint64_t* addr) 403 | { 404 | return *addr; 405 | } 406 | 407 | #endif 408 | 409 | template<> 410 | inline uint8_clamped 411 | js::jit::AtomicOperations::loadSafeWhenRacy(uint8_clamped* addr) 412 | { 413 | uint8_t v; 414 | __atomic_load(&addr->val, &v, __ATOMIC_RELAXED); 415 | return uint8_clamped(v); 416 | } 417 | 418 | template<> 419 | inline float 420 | js::jit::AtomicOperations::loadSafeWhenRacy(float* addr) 421 | { 422 | return *addr; 423 | } 424 | 425 | template<> 426 | inline double 427 | js::jit::AtomicOperations::loadSafeWhenRacy(double* addr) 428 | { 429 | return *addr; 430 | } 431 | 432 | } } 433 | 434 | template 435 | inline void 436 | js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) 437 | { 438 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 439 | __atomic_store(addr, &val, __ATOMIC_RELAXED); 440 | } 441 | 442 | namespace js { namespace jit { 443 | 444 | #if defined(JS_CODEGEN_MIPS32) 445 | 446 | template<> 447 | inline void 448 | js::jit::AtomicOperations::storeSafeWhenRacy(int64_t* addr, int64_t val) 449 | { 450 | *addr = val; 451 | } 452 | 453 | template<> 454 | inline void 455 | js::jit::AtomicOperations::storeSafeWhenRacy(uint64_t* addr, uint64_t val) 456 | { 457 | *addr = val; 458 | } 459 | 460 | #endif 461 | 462 | template<> 463 | inline void 464 | js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr, uint8_clamped val) 465 | { 466 | __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED); 467 | } 468 | 469 | template<> 470 | inline void 471 | js::jit::AtomicOperations::storeSafeWhenRacy(float* addr, float val) 472 | { 473 | *addr = val; 474 | } 475 | 476 | template<> 477 | inline void 478 | js::jit::AtomicOperations::storeSafeWhenRacy(double* addr, double val) 479 | { 480 | *addr = val; 481 | } 482 | 483 | } } 484 | 485 | inline void 486 | js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes) 487 | { 488 | MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest+nbytes)); 489 | MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src+nbytes)); 490 | ::memcpy(dest, src, nbytes); 491 | } 492 | 493 | inline void 494 | js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes) 495 | { 496 | ::memmove(dest, src, nbytes); 497 | } 498 | 499 | template 500 | inline T 501 | js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) 502 | { 503 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 504 | T v; 505 | __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST); 506 | return v; 507 | } 508 | 509 | namespace js { namespace jit { 510 | 511 | #if defined(JS_CODEGEN_MIPS32) 512 | 513 | template<> 514 | inline int64_t 515 | js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val) 516 | { 517 | AddressGuard guard(addr); 518 | int64_t old = *addr; 519 | *addr = val; 520 | return old; 521 | } 522 | 523 | template<> 524 | inline uint64_t 525 | js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val) 526 | { 527 | AddressGuard guard(addr); 528 | uint64_t old = *addr; 529 | *addr = val; 530 | return old; 531 | } 532 | 533 | #endif 534 | 535 | } } 536 | 537 | #if defined(JS_CODEGEN_MIPS32) 538 | 539 | inline void 540 | js::jit::AddressLock::acquire() 541 | { 542 | uint32_t zero = 0; 543 | uint32_t one = 1; 544 | while (!__atomic_compare_exchange(&spinlock, &zero, &one, true, __ATOMIC_SEQ_CST, 545 | __ATOMIC_SEQ_CST)) 546 | { 547 | zero = 0; 548 | } 549 | } 550 | 551 | inline void 552 | js::jit::AddressLock::release() 553 | { 554 | uint32_t zero = 0; 555 | __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST); 556 | } 557 | 558 | #endif 559 | 560 | #endif // jit_mips_shared_AtomicOperations_mips_shared_h 561 | -------------------------------------------------------------------------------- /AtomicOperations-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | /* For documentation, see jit/AtomicOperations.h */ 8 | 9 | #ifndef jit_ppc64le_AtomicOperations_ppc64le_h 10 | #define jit_ppc64le_AtomicOperations_ppc64le_h 11 | 12 | #include "mozilla/Assertions.h" 13 | #include "mozilla/Types.h" 14 | 15 | #include "builtin/AtomicsObject.h" 16 | #include "vm/ArrayBufferObject.h" 17 | 18 | #if !defined(__clang__) && !defined(__GNUC__) 19 | # error "This file only for gcc-compatible compilers" 20 | #endif 21 | 22 | inline bool 23 | js::jit::AtomicOperations::hasAtomic8() 24 | { 25 | return true; // XXX? 26 | } 27 | 28 | inline bool 29 | js::jit::AtomicOperations::isLockfree8() 30 | { 31 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0)); 32 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0)); 33 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0)); 34 | MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0)); 35 | return true; 36 | } 37 | 38 | inline void 39 | js::jit::AtomicOperations::fenceSeqCst() 40 | { 41 | __atomic_thread_fence(__ATOMIC_SEQ_CST); 42 | } 43 | 44 | template 45 | inline T 46 | js::jit::AtomicOperations::loadSeqCst(T* addr) 47 | { 48 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 49 | T v; 50 | __atomic_load(addr, &v, __ATOMIC_SEQ_CST); 51 | return v; 52 | } 53 | 54 | template 55 | inline void 56 | js::jit::AtomicOperations::storeSeqCst(T* addr, T val) 57 | { 58 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 59 | __atomic_store(addr, &val, __ATOMIC_SEQ_CST); 60 | } 61 | 62 | template 63 | inline T 64 | js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) 65 | { 66 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 67 | __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); 68 | return oldval; 69 | } 70 | 71 | template 72 | inline T 73 | js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) 74 | { 75 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 76 | return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST); 77 | } 78 | 79 | template 80 | inline T 81 | js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) 82 | { 83 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 84 | return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST); 85 | } 86 | 87 | template 88 | inline T 89 | js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) 90 | { 91 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 92 | return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST); 93 | } 94 | 95 | template 96 | inline T 97 | js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) 98 | { 99 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 100 | return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST); 101 | } 102 | 103 | template 104 | inline T 105 | js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) 106 | { 107 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 108 | return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST); 109 | 110 | } 111 | 112 | template 113 | inline T 114 | js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) 115 | { 116 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 117 | T v; 118 | __atomic_load(addr, &v, __ATOMIC_RELAXED); 119 | return v; 120 | } 121 | 122 | template<> 123 | inline uint8_clamped 124 | js::jit::AtomicOperations::loadSafeWhenRacy(uint8_clamped* addr) 125 | { 126 | uint8_t v; 127 | __atomic_load(&addr->val, &v, __ATOMIC_RELAXED); 128 | return uint8_clamped(v); 129 | } 130 | 131 | template<> 132 | inline float 133 | js::jit::AtomicOperations::loadSafeWhenRacy(float* addr) 134 | { 135 | return *addr; 136 | } 137 | 138 | template<> 139 | inline double 140 | js::jit::AtomicOperations::loadSafeWhenRacy(double* addr) 141 | { 142 | return *addr; 143 | } 144 | 145 | } } 146 | 147 | template 148 | inline void 149 | js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) 150 | { 151 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 152 | __atomic_store(addr, &val, __ATOMIC_RELAXED); 153 | } 154 | 155 | template<> 156 | inline void 157 | js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr, uint8_clamped val) 158 | { 159 | __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED); 160 | } 161 | 162 | template<> 163 | inline void 164 | js::jit::AtomicOperations::storeSafeWhenRacy(float* addr, float val) 165 | { 166 | *addr = val; 167 | } 168 | 169 | template<> 170 | inline void 171 | js::jit::AtomicOperations::storeSafeWhenRacy(double* addr, double val) 172 | { 173 | *addr = val; 174 | } 175 | 176 | inline void 177 | js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes) 178 | { 179 | MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest+nbytes)); 180 | MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src+nbytes)); 181 | ::memcpy(dest, src, nbytes); 182 | } 183 | 184 | inline void 185 | js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes) 186 | { 187 | ::memmove(dest, src, nbytes); 188 | } 189 | 190 | template 191 | inline T 192 | js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) 193 | { 194 | static_assert(sizeof(T) <= sizeof(void*), "atomics supported up to pointer size only"); 195 | T v; 196 | __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST); 197 | return v; 198 | } 199 | 200 | #endif // jit_ppc64le_AtomicOperations_ppc64le_h 201 | -------------------------------------------------------------------------------- /Bailouts-mips-shared.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/Bailouts.h" 8 | 9 | using namespace js; 10 | using namespace js::jit; 11 | 12 | BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, 13 | InvalidationBailoutStack* bailout) 14 | : machine_(bailout->machine()) 15 | { 16 | framePointer_ = (uint8_t*) bailout->fp(); 17 | topFrameSize_ = framePointer_ - bailout->sp(); 18 | topIonScript_ = bailout->ionScript(); 19 | attachOnJitActivation(activations); 20 | 21 | uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress(); 22 | const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_); 23 | snapshotOffset_ = osiIndex->snapshotOffset(); 24 | } 25 | -------------------------------------------------------------------------------- /Bailouts-mips64.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips64/Bailouts-mips64.h" 8 | 9 | #include "vm/JSContext.h" 10 | #include "vm/Realm.h" 11 | 12 | using namespace js; 13 | using namespace js::jit; 14 | 15 | BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, 16 | BailoutStack* bailout) 17 | : machine_(bailout->machineState()) 18 | { 19 | uint8_t* sp = bailout->parentStackPointer(); 20 | framePointer_ = sp + bailout->frameSize(); 21 | topFrameSize_ = framePointer_ - sp; 22 | 23 | JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken()); 24 | topIonScript_ = script->ionScript(); 25 | 26 | attachOnJitActivation(activations); 27 | snapshotOffset_ = bailout->snapshotOffset(); 28 | } 29 | -------------------------------------------------------------------------------- /Bailouts-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_Bailouts_mips64_h 8 | #define jit_mips64_Bailouts_mips64_h 9 | 10 | #include "jit/Bailouts.h" 11 | #include "jit/JitRealm.h" 12 | 13 | namespace js { 14 | namespace jit { 15 | 16 | class BailoutStack 17 | { 18 | RegisterDump::FPUArray fpregs_; 19 | RegisterDump::GPRArray regs_; 20 | uintptr_t frameSize_; 21 | uintptr_t snapshotOffset_; 22 | 23 | public: 24 | MachineState machineState() { 25 | return MachineState::FromBailout(regs_, fpregs_); 26 | } 27 | uint32_t snapshotOffset() const { 28 | return snapshotOffset_; 29 | } 30 | uint32_t frameSize() const { 31 | return frameSize_; 32 | } 33 | uint8_t* parentStackPointer() { 34 | return (uint8_t*)this + sizeof(BailoutStack); 35 | } 36 | static size_t offsetOfFrameSize() { 37 | return offsetof(BailoutStack, frameSize_); 38 | } 39 | }; 40 | 41 | } // namespace jit 42 | } // namespace js 43 | 44 | #endif /* jit_mips64_Bailouts_mips64_h */ 45 | -------------------------------------------------------------------------------- /Bailouts-ppc64le.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/Bailouts.h" 8 | #include "jit/ppc64le/Bailouts-ppc64le.h" 9 | 10 | #include "vm/JSContext.h" 11 | #include "vm/Realm.h" 12 | 13 | using namespace js; 14 | using namespace js::jit; 15 | 16 | BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, 17 | BailoutStack* bailout) 18 | : machine_(bailout->machineState()) 19 | { 20 | uint8_t* sp = bailout->parentStackPointer(); 21 | framePointer_ = sp + bailout->frameSize(); 22 | topFrameSize_ = framePointer_ - sp; 23 | 24 | JSScript* script = ScriptFromCalleeToken(((JitFrameLayout*) framePointer_)->calleeToken()); 25 | topIonScript_ = script->ionScript(); 26 | 27 | attachOnJitActivation(activations); 28 | snapshotOffset_ = bailout->snapshotOffset(); 29 | } 30 | 31 | BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations, 32 | InvalidationBailoutStack* bailout) 33 | : machine_(bailout->machine()) 34 | { 35 | framePointer_ = (uint8_t*) bailout->fp(); 36 | topFrameSize_ = framePointer_ - bailout->sp(); 37 | topIonScript_ = bailout->ionScript(); 38 | attachOnJitActivation(activations); 39 | 40 | uint8_t* returnAddressToFp_ = bailout->osiPointReturnAddress(); 41 | const OsiIndex* osiIndex = topIonScript_->getOsiIndex(returnAddressToFp_); 42 | snapshotOffset_ = osiIndex->snapshotOffset(); 43 | } 44 | -------------------------------------------------------------------------------- /Bailouts-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_Bailouts_ppc64le_h 8 | #define jit_ppc64le_Bailouts_ppc64le_h 9 | 10 | #include "jit/Bailouts.h" 11 | #include "jit/JitRealm.h" 12 | 13 | namespace js { 14 | namespace jit { 15 | 16 | class BailoutStack 17 | { 18 | RegisterDump::FPUArray fpregs_; 19 | RegisterDump::GPRArray regs_; 20 | uintptr_t frameSize_; 21 | uintptr_t snapshotOffset_; 22 | 23 | public: 24 | MachineState machineState() { 25 | return MachineState::FromBailout(regs_, fpregs_); 26 | } 27 | uint32_t snapshotOffset() const { 28 | return snapshotOffset_; 29 | } 30 | uint32_t frameSize() const { 31 | return frameSize_; 32 | } 33 | uint8_t* parentStackPointer() { 34 | return (uint8_t*)this + sizeof(BailoutStack); 35 | } 36 | static size_t offsetOfFrameSize() { 37 | return offsetof(BailoutStack, frameSize_); 38 | } 39 | }; 40 | 41 | } // namespace jit 42 | } // namespace js 43 | 44 | #endif /* jit_ppc64le_Bailouts_ppc64le_h */ 45 | -------------------------------------------------------------------------------- /BaselineCompiler-mips-shared.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips-shared/BaselineCompiler-mips-shared.h" 8 | 9 | using namespace js; 10 | using namespace js::jit; 11 | 12 | BaselineCompilerMIPSShared::BaselineCompilerMIPSShared(JSContext* cx, TempAllocator& alloc, 13 | JSScript* script) 14 | : BaselineCompilerShared(cx, alloc, script) 15 | { 16 | } 17 | -------------------------------------------------------------------------------- /BaselineCompiler-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_BaselineCompiler_mips_shared_h 8 | #define jit_mips_shared_BaselineCompiler_mips_shared_h 9 | 10 | #include "jit/shared/BaselineCompiler-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class BaselineCompilerMIPSShared : public BaselineCompilerShared 16 | { 17 | protected: 18 | BaselineCompilerMIPSShared(JSContext* cx, TempAllocator& alloc, JSScript* script); 19 | }; 20 | 21 | } // namespace jit 22 | } // namespace js 23 | 24 | #endif /* jit_mips_shared_BaselineCompiler_mips_shared_h */ 25 | -------------------------------------------------------------------------------- /BaselineCompiler-mips64.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips64/BaselineCompiler-mips64.h" 8 | 9 | using namespace js; 10 | using namespace js::jit; 11 | 12 | BaselineCompilerMIPS64::BaselineCompilerMIPS64(JSContext* cx, TempAllocator& alloc, 13 | JSScript* script) 14 | : BaselineCompilerMIPSShared(cx, alloc, script) 15 | { 16 | } 17 | -------------------------------------------------------------------------------- /BaselineCompiler-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_BaselineCompiler_mips64_h 8 | #define jit_mips64_BaselineCompiler_mips64_h 9 | 10 | #include "jit/mips-shared/BaselineCompiler-mips-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class BaselineCompilerMIPS64 : public BaselineCompilerMIPSShared 16 | { 17 | protected: 18 | BaselineCompilerMIPS64(JSContext* cx, TempAllocator& alloc, JSScript* script); 19 | }; 20 | 21 | typedef BaselineCompilerMIPS64 BaselineCompilerSpecific; 22 | 23 | } // namespace jit 24 | } // namespace js 25 | 26 | #endif /* jit_mips64_BaselineCompiler_mips64_h */ 27 | -------------------------------------------------------------------------------- /BaselineCompiler-ppc64le.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/ppc64le/BaselineCompiler-ppc64le.h" 8 | 9 | using namespace js; 10 | using namespace js::jit; 11 | 12 | BaselineCompilerPPC64LE::BaselineCompilerPPC64LE(JSContext* cx, TempAllocator& alloc, 13 | JSScript* script) 14 | : BaselineCompilerShared(cx, alloc, script) 15 | { 16 | } 17 | -------------------------------------------------------------------------------- /BaselineCompiler-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_BaselineCompiler_ppc64le_h 8 | #define jit_ppc64le_BaselineCompiler_ppc64le_h 9 | 10 | #include "jit/shared/BaselineCompiler-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class BaselineCompilerPPC64LE : public BaselineCompilerShared 16 | { 17 | protected: 18 | BaselineCompilerPPC64LE(JSContext* cx, TempAllocator& alloc, JSScript* script); 19 | }; 20 | 21 | typedef BaselineCompilerPPC64LE BaselineCompilerSpecific; 22 | 23 | } // namespace jit 24 | } // namespace js 25 | 26 | #endif /* jit_ppc64le_BaselineCompiler_ppc64le_h */ 27 | -------------------------------------------------------------------------------- /BaselineIC-mips-shared.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/BaselineCompiler.h" 8 | #include "jit/BaselineIC.h" 9 | #include "jit/BaselineJIT.h" 10 | #include "jit/Linker.h" 11 | #include "jit/SharedICHelpers.h" 12 | 13 | using namespace js; 14 | using namespace js::jit; 15 | 16 | namespace js { 17 | namespace jit { 18 | 19 | // ICCompare_Int32 20 | 21 | bool 22 | ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm) 23 | { 24 | // Guard that R0 is an integer and R1 is an integer. 25 | Label failure; 26 | Label conditionTrue; 27 | masm.branchTestInt32(Assembler::NotEqual, R0, &failure); 28 | masm.branchTestInt32(Assembler::NotEqual, R1, &failure); 29 | 30 | // Compare payload regs of R0 and R1. 31 | masm.unboxInt32(R0, ExtractTemp0); 32 | masm.unboxInt32(R1, ExtractTemp1); 33 | Assembler::Condition cond = JSOpToCondition(op, /* signed = */true); 34 | masm.ma_cmp_set(R0.valueReg(), ExtractTemp0, ExtractTemp1, cond); 35 | 36 | masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0); 37 | EmitReturnFromIC(masm); 38 | 39 | // Failure case - jump to next stub 40 | masm.bind(&failure); 41 | EmitStubGuardFailure(masm); 42 | 43 | return true; 44 | } 45 | 46 | } // namespace jit 47 | } // namespace js 48 | -------------------------------------------------------------------------------- /BaselineIC-mips64.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/BaselineCompiler.h" 8 | #include "jit/BaselineIC.h" 9 | #include "jit/BaselineJIT.h" 10 | #include "jit/Linker.h" 11 | #include "jit/SharedICHelpers.h" 12 | 13 | using namespace js; 14 | using namespace js::jit; 15 | 16 | namespace js { 17 | namespace jit { 18 | 19 | // ICCompare_Int32 20 | 21 | bool 22 | ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm) 23 | { 24 | // Guard that R0 is an integer and R1 is an integer. 25 | Label failure; 26 | Label conditionTrue; 27 | masm.branchTestInt32(Assembler::NotEqual, R0, &failure); 28 | masm.branchTestInt32(Assembler::NotEqual, R1, &failure); 29 | 30 | // Compare payload regs of R0 and R1. 31 | masm.unboxInt32(R0, ExtractTemp0); 32 | masm.unboxInt32(R1, ExtractTemp1); 33 | Assembler::Condition cond = JSOpToCondition(op, /* signed = */true); 34 | masm.ma_cmp_set(R0.valueReg(), ExtractTemp0, ExtractTemp1, cond); 35 | 36 | masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0); 37 | EmitReturnFromIC(masm); 38 | 39 | // Failure case - jump to next stub 40 | masm.bind(&failure); 41 | EmitStubGuardFailure(masm); 42 | 43 | return true; 44 | } 45 | 46 | } // namespace jit 47 | } // namespace js 48 | -------------------------------------------------------------------------------- /BaselineIC-ppc64le.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/BaselineCompiler.h" 8 | #include "jit/BaselineIC.h" 9 | #include "jit/BaselineJIT.h" 10 | #include "jit/Linker.h" 11 | #include "jit/SharedICHelpers.h" 12 | 13 | using namespace js; 14 | using namespace js::jit; 15 | 16 | namespace js { 17 | namespace jit { 18 | 19 | // ICCompare_Int32 20 | 21 | bool 22 | ICCompare_Int32::Compiler::generateStubCode(MacroAssembler& masm) 23 | { 24 | // Guard that R0 is an integer and R1 is an integer. 25 | Label failure; 26 | Label conditionTrue; 27 | masm.branchTestInt32(Assembler::NotEqual, R0, &failure); 28 | masm.branchTestInt32(Assembler::NotEqual, R1, &failure); 29 | 30 | // Compare payload regs of R0 and R1. 31 | masm.unboxInt32(R0, ExtractTemp0); 32 | masm.unboxInt32(R1, ExtractTemp1); 33 | Assembler::Condition cond = JSOpToCondition(op, /* signed = */true); 34 | masm.ma_cmp_set(R0.valueReg(), ExtractTemp0, ExtractTemp1, cond); 35 | 36 | masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.valueReg(), R0); 37 | EmitReturnFromIC(masm); 38 | 39 | // Failure case - jump to next stub 40 | masm.bind(&failure); 41 | EmitStubGuardFailure(masm); 42 | 43 | return true; 44 | } 45 | 46 | } // namespace jit 47 | } // namespace js 48 | -------------------------------------------------------------------------------- /CodeGenerator-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_CodeGenerator_mips_shared_h 8 | #define jit_mips_shared_CodeGenerator_mips_shared_h 9 | 10 | #include "jit/shared/CodeGenerator-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class CodeGeneratorMIPSShared; 16 | class OutOfLineBailout; 17 | class OutOfLineTableSwitch; 18 | 19 | using OutOfLineWasmTruncateCheck = OutOfLineWasmTruncateCheckBase; 20 | 21 | class CodeGeneratorMIPSShared : public CodeGeneratorShared 22 | { 23 | friend class MoveResolverMIPS; 24 | 25 | protected: 26 | CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm); 27 | 28 | NonAssertingLabel deoptLabel_; 29 | 30 | Operand ToOperand(const LAllocation& a); 31 | Operand ToOperand(const LAllocation* a); 32 | Operand ToOperand(const LDefinition* def); 33 | 34 | #ifdef JS_PUNBOX64 35 | Operand ToOperandOrRegister64(const LInt64Allocation input); 36 | #else 37 | Register64 ToOperandOrRegister64(const LInt64Allocation input); 38 | #endif 39 | 40 | MoveOperand toMoveOperand(LAllocation a) const; 41 | 42 | template 43 | void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { 44 | Label bail; 45 | masm.branch32(c, lhs, rhs, &bail); 46 | bailoutFrom(&bail, snapshot); 47 | } 48 | template 49 | void bailoutTest32(Assembler::Condition c, Register lhs, T rhs, LSnapshot* snapshot) { 50 | Label bail; 51 | masm.branchTest32(c, lhs, rhs, &bail); 52 | bailoutFrom(&bail, snapshot); 53 | } 54 | template 55 | void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { 56 | Label bail; 57 | masm.branchPtr(c, lhs, rhs, &bail); 58 | bailoutFrom(&bail, snapshot); 59 | } 60 | void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) { 61 | Label bail; 62 | masm.branchTestPtr(c, lhs, rhs, &bail); 63 | bailoutFrom(&bail, snapshot); 64 | } 65 | void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) { 66 | Label bail; 67 | masm.branchTest32(Assembler::Zero, reg, Imm32(0xFF), &bail); 68 | bailoutFrom(&bail, snapshot); 69 | } 70 | 71 | void bailoutFrom(Label* label, LSnapshot* snapshot); 72 | void bailout(LSnapshot* snapshot); 73 | 74 | bool generateOutOfLineCode(); 75 | 76 | template 77 | void branchToBlock(Register lhs, T rhs, MBasicBlock* mir, Assembler::Condition cond) 78 | { 79 | masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond); 80 | } 81 | void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, 82 | MBasicBlock* mir, Assembler::DoubleCondition cond); 83 | 84 | // Emits a branch that directs control flow to the true block if |cond| is 85 | // true, and the false block if |cond| is false. 86 | template 87 | void emitBranch(Register lhs, T rhs, Assembler::Condition cond, 88 | MBasicBlock* mirTrue, MBasicBlock* mirFalse) 89 | { 90 | if (isNextBlock(mirFalse->lir())) { 91 | branchToBlock(lhs, rhs, mirTrue, cond); 92 | } else { 93 | branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond)); 94 | jumpToBlock(mirTrue); 95 | } 96 | } 97 | void testZeroEmitBranch(Assembler::Condition cond, Register reg, 98 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 99 | { 100 | emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse); 101 | } 102 | 103 | void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base); 104 | 105 | template 106 | void emitWasmLoad(T* ins); 107 | template 108 | void emitWasmStore(T* ins); 109 | 110 | void generateInvalidateEpilogue(); 111 | 112 | // Generating a result. 113 | template 114 | void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, 115 | const T& mem, Register flagTemp, Register outTemp, 116 | Register valueTemp, Register offsetTemp, Register maskTemp, 117 | AnyRegister output); 118 | 119 | // Generating no result. 120 | template 121 | void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, 122 | const T& mem, Register flagTemp, Register valueTemp, 123 | Register offsetTemp, Register maskTemp); 124 | 125 | public: 126 | // Out of line visitors. 127 | void visitOutOfLineBailout(OutOfLineBailout* ool); 128 | void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool); 129 | void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool); 130 | }; 131 | 132 | // An out-of-line bailout thunk. 133 | class OutOfLineBailout : public OutOfLineCodeBase 134 | { 135 | LSnapshot* snapshot_; 136 | uint32_t frameSize_; 137 | 138 | public: 139 | OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize) 140 | : snapshot_(snapshot), 141 | frameSize_(frameSize) 142 | { } 143 | 144 | void accept(CodeGeneratorMIPSShared* codegen) override; 145 | 146 | LSnapshot* snapshot() const { 147 | return snapshot_; 148 | } 149 | }; 150 | 151 | } // namespace jit 152 | } // namespace js 153 | 154 | #endif /* jit_mips_shared_CodeGenerator_mips_shared_h */ 155 | -------------------------------------------------------------------------------- /CodeGenerator-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_CodeGenerator_mips64_h 8 | #define jit_mips64_CodeGenerator_mips64_h 9 | 10 | #include "jit/mips-shared/CodeGenerator-mips-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class CodeGeneratorMIPS64 : public CodeGeneratorMIPSShared 16 | { 17 | protected: 18 | CodeGeneratorMIPS64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm) 19 | : CodeGeneratorMIPSShared(gen, graph, masm) 20 | { } 21 | 22 | void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value, 23 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 24 | { 25 | MOZ_ASSERT(value.valueReg() != SecondScratchReg); 26 | masm.splitTag(value.valueReg(), SecondScratchReg); 27 | emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse); 28 | } 29 | void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value, 30 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 31 | { 32 | MOZ_ASSERT(value.valueReg() != SecondScratchReg); 33 | masm.splitTag(value.valueReg(), SecondScratchReg); 34 | emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue, ifFalse); 35 | } 36 | void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value, 37 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 38 | { 39 | MOZ_ASSERT(value.valueReg() != SecondScratchReg); 40 | masm.splitTag(value.valueReg(), SecondScratchReg); 41 | emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue, ifFalse); 42 | } 43 | 44 | 45 | template 46 | void emitWasmLoadI64(T* ins); 47 | template 48 | void emitWasmStoreI64(T* ins); 49 | 50 | ValueOperand ToValue(LInstruction* ins, size_t pos); 51 | ValueOperand ToTempValue(LInstruction* ins, size_t pos); 52 | 53 | // Functions for LTestVAndBranch. 54 | void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag); 55 | }; 56 | 57 | typedef CodeGeneratorMIPS64 CodeGeneratorSpecific; 58 | 59 | } // namespace jit 60 | } // namespace js 61 | 62 | #endif /* jit_mips64_CodeGenerator_mips64_h */ 63 | -------------------------------------------------------------------------------- /CodeGenerator-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_PPC64LE_CodeGenerator_PPC64LE_h 8 | #define jit_PPC64LE_CodeGenerator_PPC64LE_h 9 | 10 | #include "jit/ppc64le/Assembler-ppc64le.h" 11 | #include "jit/shared/CodeGenerator-shared.h" 12 | 13 | namespace js { 14 | namespace jit { 15 | 16 | class CodeGeneratorShared; 17 | class OutOfLineBailout; 18 | class OutOfLineTableSwitch; 19 | 20 | using OutOfLineWasmTruncateCheck = OutOfLineWasmTruncateCheckBase; 21 | 22 | class CodeGeneratorPPC64LE : public CodeGeneratorShared 23 | { 24 | friend class MoveResolverPPC64LE; 25 | 26 | protected: 27 | CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm); 28 | 29 | NonAssertingLabel deoptLabel_; 30 | 31 | Operand ToOperand(const LAllocation& a); 32 | Operand ToOperand(const LAllocation* a); 33 | Operand ToOperand(const LDefinition* def); 34 | 35 | Operand ToOperandOrRegister64(const LInt64Allocation input); 36 | 37 | MoveOperand toMoveOperand(LAllocation a) const; 38 | 39 | template 40 | void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { 41 | Label bail; 42 | masm.branch32(c, lhs, rhs, &bail); 43 | bailoutFrom(&bail, snapshot); 44 | } 45 | template 46 | void bailoutTest32(Assembler::Condition c, Register lhs, T rhs, LSnapshot* snapshot) { 47 | Label bail; 48 | masm.branchTest32(c, lhs, rhs, &bail); 49 | bailoutFrom(&bail, snapshot); 50 | } 51 | template 52 | void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) { 53 | Label bail; 54 | masm.branchPtr(c, lhs, rhs, &bail); 55 | bailoutFrom(&bail, snapshot); 56 | } 57 | void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs, LSnapshot* snapshot) { 58 | Label bail; 59 | masm.branchTestPtr(c, lhs, rhs, &bail); 60 | bailoutFrom(&bail, snapshot); 61 | } 62 | void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) { 63 | Label bail; 64 | masm.branchTest32(Assembler::Zero, reg, Imm32(0xFF), &bail); 65 | bailoutFrom(&bail, snapshot); 66 | } 67 | 68 | void bailoutFrom(Label* label, LSnapshot* snapshot); 69 | void bailout(LSnapshot* snapshot); 70 | 71 | bool generateOutOfLineCode(); 72 | 73 | template 74 | void branchToBlock(Register lhs, T rhs, MBasicBlock* mir, Assembler::Condition cond) 75 | { 76 | masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond); 77 | } 78 | void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, 79 | MBasicBlock* mir, Assembler::DoubleCondition cond); 80 | 81 | // Emits a branch that directs control flow to the true block if |cond| is 82 | // true, and the false block if |cond| is false. 83 | template 84 | void emitBranch(Register lhs, T rhs, Assembler::Condition cond, 85 | MBasicBlock* mirTrue, MBasicBlock* mirFalse) 86 | { 87 | if (isNextBlock(mirFalse->lir())) { 88 | branchToBlock(lhs, rhs, mirTrue, cond); 89 | } else { 90 | branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond)); 91 | jumpToBlock(mirTrue); 92 | } 93 | } 94 | void testZeroEmitBranch(Assembler::Condition cond, Register reg, 95 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 96 | { 97 | emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse); 98 | } 99 | 100 | void emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base); 101 | 102 | template 103 | void emitWasmLoad(T* ins); 104 | template 105 | void emitWasmStore(T* ins); 106 | 107 | void generateInvalidateEpilogue(); 108 | 109 | // Generating a result. 110 | template 111 | void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, 112 | const T& mem, Register flagTemp, Register outTemp, 113 | Register valueTemp, Register offsetTemp, Register maskTemp, 114 | AnyRegister output); 115 | 116 | // Generating no result. 117 | template 118 | void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, 119 | const T& mem, Register flagTemp, Register valueTemp, 120 | Register offsetTemp, Register maskTemp); 121 | 122 | public: 123 | // Out of line visitors. 124 | void visitOutOfLineBailout(OutOfLineBailout* ool); 125 | void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool); 126 | void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool); 127 | 128 | protected: 129 | void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value, 130 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 131 | { 132 | MOZ_ASSERT(value.valueReg() != SecondScratchReg); 133 | masm.splitTag(value.valueReg(), SecondScratchReg); 134 | emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse); 135 | } 136 | void testUndefinedEmitBranch(Assembler::Condition cond, const ValueOperand& value, 137 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 138 | { 139 | MOZ_ASSERT(value.valueReg() != SecondScratchReg); 140 | masm.splitTag(value.valueReg(), SecondScratchReg); 141 | emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue, ifFalse); 142 | } 143 | void testObjectEmitBranch(Assembler::Condition cond, const ValueOperand& value, 144 | MBasicBlock* ifTrue, MBasicBlock* ifFalse) 145 | { 146 | MOZ_ASSERT(value.valueReg() != SecondScratchReg); 147 | masm.splitTag(value.valueReg(), SecondScratchReg); 148 | emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue, ifFalse); 149 | } 150 | 151 | 152 | template 153 | void emitWasmLoadI64(T* ins); 154 | template 155 | void emitWasmStoreI64(T* ins); 156 | 157 | ValueOperand ToValue(LInstruction* ins, size_t pos); 158 | ValueOperand ToTempValue(LInstruction* ins, size_t pos); 159 | 160 | // Functions for LTestVAndBranch. 161 | void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag); 162 | }; 163 | 164 | typedef CodeGeneratorPPC64LE CodeGeneratorSpecific; 165 | 166 | // An out-of-line bailout thunk. 167 | class OutOfLineBailout : public OutOfLineCodeBase 168 | { 169 | LSnapshot* snapshot_; 170 | uint32_t frameSize_; 171 | 172 | public: 173 | OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize) 174 | : snapshot_(snapshot), 175 | frameSize_(frameSize) 176 | { } 177 | 178 | void accept(CodeGeneratorShared* codegen) override; 179 | 180 | LSnapshot* snapshot() const { 181 | return snapshot_; 182 | } 183 | }; 184 | 185 | } // namespace jit 186 | } // namespace js 187 | 188 | #endif /* jit_PPC64LE_CodeGenerator_PPC64LE_h */ 189 | -------------------------------------------------------------------------------- /LIR-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_LIR_mips_shared_h 8 | #define jit_mips_shared_LIR_mips_shared_h 9 | 10 | namespace js { 11 | namespace jit { 12 | 13 | // Convert a 32-bit unsigned integer to a double. 14 | class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> 15 | { 16 | public: 17 | LIR_HEADER(WasmUint32ToDouble) 18 | 19 | LWasmUint32ToDouble(const LAllocation& input) 20 | : LInstructionHelper(classOpcode) 21 | { 22 | setOperand(0, input); 23 | } 24 | }; 25 | 26 | // Convert a 32-bit unsigned integer to a float32. 27 | class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> 28 | { 29 | public: 30 | LIR_HEADER(WasmUint32ToFloat32) 31 | 32 | LWasmUint32ToFloat32(const LAllocation& input) 33 | : LInstructionHelper(classOpcode) 34 | { 35 | setOperand(0, input); 36 | } 37 | }; 38 | 39 | 40 | class LDivI : public LBinaryMath<1> 41 | { 42 | public: 43 | LIR_HEADER(DivI); 44 | 45 | LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) 46 | : LBinaryMath(classOpcode) 47 | { 48 | setOperand(0, lhs); 49 | setOperand(1, rhs); 50 | setTemp(0, temp); 51 | } 52 | 53 | MDiv* mir() const { 54 | return mir_->toDiv(); 55 | } 56 | }; 57 | 58 | class LDivPowTwoI : public LInstructionHelper<1, 1, 1> 59 | { 60 | const int32_t shift_; 61 | 62 | public: 63 | LIR_HEADER(DivPowTwoI) 64 | 65 | LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp) 66 | : LInstructionHelper(classOpcode), 67 | shift_(shift) 68 | { 69 | setOperand(0, lhs); 70 | setTemp(0, temp); 71 | } 72 | 73 | const LAllocation* numerator() { 74 | return getOperand(0); 75 | } 76 | int32_t shift() const { 77 | return shift_; 78 | } 79 | MDiv* mir() const { 80 | return mir_->toDiv(); 81 | } 82 | }; 83 | 84 | class LModI : public LBinaryMath<1> 85 | { 86 | public: 87 | LIR_HEADER(ModI); 88 | 89 | LModI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& callTemp) 90 | : LBinaryMath(classOpcode) 91 | { 92 | setOperand(0, lhs); 93 | setOperand(1, rhs); 94 | setTemp(0, callTemp); 95 | } 96 | 97 | const LDefinition* callTemp() { 98 | return getTemp(0); 99 | } 100 | MMod* mir() const { 101 | return mir_->toMod(); 102 | } 103 | }; 104 | 105 | class LModPowTwoI : public LInstructionHelper<1, 1, 0> 106 | { 107 | const int32_t shift_; 108 | 109 | public: 110 | LIR_HEADER(ModPowTwoI); 111 | 112 | LModPowTwoI(const LAllocation& lhs, int32_t shift) 113 | : LInstructionHelper(classOpcode), 114 | shift_(shift) 115 | { 116 | setOperand(0, lhs); 117 | } 118 | 119 | int32_t shift() const { 120 | return shift_; 121 | } 122 | MMod* mir() const { 123 | return mir_->toMod(); 124 | } 125 | }; 126 | 127 | class LModMaskI : public LInstructionHelper<1, 1, 2> 128 | { 129 | const int32_t shift_; 130 | 131 | public: 132 | LIR_HEADER(ModMaskI); 133 | 134 | LModMaskI(const LAllocation& lhs, const LDefinition& temp0, const LDefinition& temp1, 135 | int32_t shift) 136 | : LInstructionHelper(classOpcode), 137 | shift_(shift) 138 | { 139 | setOperand(0, lhs); 140 | setTemp(0, temp0); 141 | setTemp(1, temp1); 142 | } 143 | 144 | int32_t shift() const { 145 | return shift_; 146 | } 147 | MMod* mir() const { 148 | return mir_->toMod(); 149 | } 150 | }; 151 | 152 | // Takes a tableswitch with an integer to decide 153 | class LTableSwitch : public LInstructionHelper<0, 1, 2> 154 | { 155 | public: 156 | LIR_HEADER(TableSwitch); 157 | 158 | LTableSwitch(const LAllocation& in, const LDefinition& inputCopy, 159 | const LDefinition& jumpTablePointer, MTableSwitch* ins) 160 | : LInstructionHelper(classOpcode) 161 | { 162 | setOperand(0, in); 163 | setTemp(0, inputCopy); 164 | setTemp(1, jumpTablePointer); 165 | setMir(ins); 166 | } 167 | 168 | MTableSwitch* mir() const { 169 | return mir_->toTableSwitch(); 170 | } 171 | const LAllocation* index() { 172 | return getOperand(0); 173 | } 174 | const LDefinition* tempInt() { 175 | return getTemp(0); 176 | } 177 | // This is added to share the same CodeGenerator prefixes. 178 | const LDefinition* tempPointer() { 179 | return getTemp(1); 180 | } 181 | }; 182 | 183 | // Takes a tableswitch with an integer to decide 184 | class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> 185 | { 186 | public: 187 | LIR_HEADER(TableSwitchV); 188 | 189 | LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy, 190 | const LDefinition& floatCopy, const LDefinition& jumpTablePointer, 191 | MTableSwitch* ins) 192 | : LInstructionHelper(classOpcode) 193 | { 194 | setBoxOperand(InputValue, input); 195 | setTemp(0, inputCopy); 196 | setTemp(1, floatCopy); 197 | setTemp(2, jumpTablePointer); 198 | setMir(ins); 199 | } 200 | 201 | MTableSwitch* mir() const { 202 | return mir_->toTableSwitch(); 203 | } 204 | 205 | static const size_t InputValue = 0; 206 | 207 | const LDefinition* tempInt() { 208 | return getTemp(0); 209 | } 210 | const LDefinition* tempFloat() { 211 | return getTemp(1); 212 | } 213 | const LDefinition* tempPointer() { 214 | return getTemp(2); 215 | } 216 | }; 217 | 218 | class LMulI : public LBinaryMath<0> 219 | { 220 | public: 221 | LIR_HEADER(MulI); 222 | 223 | LMulI() 224 | : LBinaryMath(classOpcode) 225 | {} 226 | 227 | MMul* mir() { 228 | return mir_->toMul(); 229 | } 230 | }; 231 | 232 | class LUDivOrMod : public LBinaryMath<0> 233 | { 234 | public: 235 | LIR_HEADER(UDivOrMod); 236 | 237 | LUDivOrMod() 238 | : LBinaryMath(classOpcode) 239 | {} 240 | 241 | MBinaryArithInstruction* mir() const { 242 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 243 | return static_cast(mir_); 244 | } 245 | 246 | bool canBeDivideByZero() const { 247 | if (mir_->isMod()) 248 | return mir_->toMod()->canBeDivideByZero(); 249 | return mir_->toDiv()->canBeDivideByZero(); 250 | } 251 | 252 | bool trapOnError() const { 253 | if (mir_->isMod()) 254 | return mir_->toMod()->trapOnError(); 255 | return mir_->toDiv()->trapOnError(); 256 | } 257 | 258 | wasm::BytecodeOffset bytecodeOffset() const { 259 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 260 | if (mir_->isMod()) 261 | return mir_->toMod()->bytecodeOffset(); 262 | return mir_->toDiv()->bytecodeOffset(); 263 | } 264 | }; 265 | 266 | namespace details { 267 | 268 | // Base class for the int64 and non-int64 variants. 269 | template 270 | class LWasmUnalignedLoadBase : public details::LWasmLoadBase 271 | { 272 | public: 273 | typedef LWasmLoadBase Base; 274 | 275 | explicit LWasmUnalignedLoadBase(LNode::Opcode opcode, const LAllocation& ptr, 276 | const LDefinition& valueHelper) 277 | : Base(opcode, ptr, LAllocation()) 278 | { 279 | Base::setTemp(0, LDefinition::BogusTemp()); 280 | Base::setTemp(1, valueHelper); 281 | } 282 | 283 | const LAllocation* ptr() { 284 | return Base::getOperand(0); 285 | } 286 | const LDefinition* ptrCopy() { 287 | return Base::getTemp(0); 288 | } 289 | }; 290 | 291 | } // namespace details 292 | 293 | class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1> 294 | { 295 | public: 296 | LIR_HEADER(WasmUnalignedLoad); 297 | 298 | explicit LWasmUnalignedLoad(const LAllocation& ptr, const LDefinition& valueHelper) 299 | : LWasmUnalignedLoadBase(classOpcode, ptr, valueHelper) 300 | {} 301 | }; 302 | 303 | class LWasmUnalignedLoadI64 : public details::LWasmUnalignedLoadBase 304 | { 305 | public: 306 | LIR_HEADER(WasmUnalignedLoadI64); 307 | 308 | explicit LWasmUnalignedLoadI64(const LAllocation& ptr, const LDefinition& valueHelper) 309 | : LWasmUnalignedLoadBase(classOpcode, ptr, valueHelper) 310 | {} 311 | }; 312 | 313 | namespace details { 314 | 315 | // Base class for the int64 and non-int64 variants. 316 | template 317 | class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2> 318 | { 319 | public: 320 | typedef LInstructionHelper<0, NumOps, 2> Base; 321 | 322 | static const size_t PtrIndex = 0; 323 | static const size_t ValueIndex = 1; 324 | 325 | LWasmUnalignedStoreBase(LNode::Opcode opcode, const LAllocation& ptr, 326 | const LDefinition& valueHelper) 327 | : Base(opcode) 328 | { 329 | Base::setOperand(0, ptr); 330 | Base::setTemp(0, LDefinition::BogusTemp()); 331 | Base::setTemp(1, valueHelper); 332 | } 333 | 334 | MWasmStore* mir() const { 335 | return Base::mir_->toWasmStore(); 336 | } 337 | const LAllocation* ptr() { 338 | return Base::getOperand(PtrIndex); 339 | } 340 | const LDefinition* ptrCopy() { 341 | return Base::getTemp(0); 342 | } 343 | }; 344 | 345 | } // namespace details 346 | 347 | class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<2> 348 | { 349 | public: 350 | LIR_HEADER(WasmUnalignedStore); 351 | 352 | LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value, 353 | const LDefinition& valueHelper) 354 | : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) 355 | { 356 | setOperand(1, value); 357 | } 358 | 359 | const LAllocation* value() { 360 | return Base::getOperand(ValueIndex); 361 | } 362 | }; 363 | 364 | class LWasmUnalignedStoreI64 : public details::LWasmUnalignedStoreBase<1 + INT64_PIECES> 365 | { 366 | public: 367 | LIR_HEADER(WasmUnalignedStoreI64); 368 | LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value, 369 | const LDefinition& valueHelper) 370 | : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) 371 | { 372 | setInt64Operand(1, value); 373 | } 374 | 375 | const LInt64Allocation value() { 376 | return getInt64Operand(ValueIndex); 377 | } 378 | }; 379 | 380 | class LWasmCompareExchangeI64 : public LInstructionHelper 381 | { 382 | public: 383 | LIR_HEADER(WasmCompareExchangeI64); 384 | 385 | LWasmCompareExchangeI64(const LAllocation& ptr, const LInt64Allocation& oldValue, 386 | const LInt64Allocation& newValue) 387 | : LInstructionHelper(classOpcode) 388 | { 389 | setOperand(0, ptr); 390 | setInt64Operand(1, oldValue); 391 | setInt64Operand(1 + INT64_PIECES, newValue); 392 | } 393 | 394 | const LAllocation* ptr() { 395 | return getOperand(0); 396 | } 397 | const LInt64Allocation oldValue() { 398 | return getInt64Operand(1); 399 | } 400 | const LInt64Allocation newValue() { 401 | return getInt64Operand(1 + INT64_PIECES); 402 | } 403 | const MWasmCompareExchangeHeap* mir() const { 404 | return mir_->toWasmCompareExchangeHeap(); 405 | } 406 | }; 407 | 408 | class LWasmAtomicExchangeI64 : public LInstructionHelper 409 | { 410 | public: 411 | LIR_HEADER(WasmAtomicExchangeI64); 412 | 413 | LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value) 414 | : LInstructionHelper(classOpcode) 415 | { 416 | setOperand(0, ptr); 417 | setInt64Operand(1, value); 418 | } 419 | 420 | const LAllocation* ptr() { 421 | return getOperand(0); 422 | } 423 | const LInt64Allocation value() { 424 | return getInt64Operand(1); 425 | } 426 | const MWasmAtomicExchangeHeap* mir() const { 427 | return mir_->toWasmAtomicExchangeHeap(); 428 | } 429 | }; 430 | 431 | class LWasmAtomicBinopI64 : public LInstructionHelper 432 | { 433 | public: 434 | LIR_HEADER(WasmAtomicBinopI64); 435 | 436 | LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value) 437 | : LInstructionHelper(classOpcode) 438 | { 439 | setOperand(0, ptr); 440 | setInt64Operand(1, value); 441 | } 442 | 443 | const LAllocation* ptr() { 444 | return getOperand(0); 445 | } 446 | const LInt64Allocation value() { 447 | return getInt64Operand(1); 448 | } 449 | const MWasmAtomicBinopHeap* mir() const { 450 | return mir_->toWasmAtomicBinopHeap(); 451 | } 452 | }; 453 | 454 | 455 | } // namespace jit 456 | } // namespace js 457 | 458 | #endif /* jit_mips_shared_LIR_mips_shared_h */ 459 | -------------------------------------------------------------------------------- /LIR-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_LIR_mips64_h 8 | #define jit_mips64_LIR_mips64_h 9 | 10 | namespace js { 11 | namespace jit { 12 | 13 | class LUnbox : public LInstructionHelper<1, 1, 0> 14 | { 15 | protected: 16 | LUnbox(LNode::Opcode opcode, const LAllocation& input) 17 | : LInstructionHelper(opcode) 18 | { 19 | setOperand(0, input); 20 | } 21 | 22 | public: 23 | LIR_HEADER(Unbox); 24 | 25 | explicit LUnbox(const LAllocation& input) 26 | : LInstructionHelper(classOpcode) 27 | { 28 | setOperand(0, input); 29 | } 30 | 31 | static const size_t Input = 0; 32 | 33 | MUnbox* mir() const { 34 | return mir_->toUnbox(); 35 | } 36 | const char* extraName() const { 37 | return StringFromMIRType(mir()->type()); 38 | } 39 | }; 40 | 41 | class LUnboxFloatingPoint : public LUnbox 42 | { 43 | MIRType type_; 44 | 45 | public: 46 | LIR_HEADER(UnboxFloatingPoint); 47 | 48 | LUnboxFloatingPoint(const LAllocation& input, MIRType type) 49 | : LUnbox(classOpcode, input), 50 | type_(type) 51 | { } 52 | 53 | MIRType type() const { 54 | return type_; 55 | } 56 | }; 57 | 58 | class LDivOrModI64 : public LBinaryMath<1> 59 | { 60 | public: 61 | LIR_HEADER(DivOrModI64) 62 | 63 | LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) 64 | : LBinaryMath(classOpcode) 65 | { 66 | setOperand(0, lhs); 67 | setOperand(1, rhs); 68 | setTemp(0, temp); 69 | } 70 | 71 | const LDefinition* remainder() { 72 | return getTemp(0); 73 | } 74 | MBinaryArithInstruction* mir() const { 75 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 76 | return static_cast(mir_); 77 | } 78 | 79 | bool canBeDivideByZero() const { 80 | if (mir_->isMod()) 81 | return mir_->toMod()->canBeDivideByZero(); 82 | return mir_->toDiv()->canBeDivideByZero(); 83 | } 84 | bool canBeNegativeOverflow() const { 85 | if (mir_->isMod()) 86 | return mir_->toMod()->canBeNegativeDividend(); 87 | return mir_->toDiv()->canBeNegativeOverflow(); 88 | } 89 | wasm::BytecodeOffset bytecodeOffset() const { 90 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 91 | if (mir_->isMod()) 92 | return mir_->toMod()->bytecodeOffset(); 93 | return mir_->toDiv()->bytecodeOffset(); 94 | } 95 | }; 96 | 97 | class LUDivOrModI64 : public LBinaryMath<1> 98 | { 99 | public: 100 | LIR_HEADER(UDivOrModI64); 101 | 102 | LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) 103 | : LBinaryMath(classOpcode) 104 | { 105 | setOperand(0, lhs); 106 | setOperand(1, rhs); 107 | setTemp(0, temp); 108 | } 109 | 110 | const LDefinition* remainder() { 111 | return getTemp(0); 112 | } 113 | const char* extraName() const { 114 | return mir()->isTruncated() ? "Truncated" : nullptr; 115 | } 116 | 117 | MBinaryArithInstruction* mir() const { 118 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 119 | return static_cast(mir_); 120 | } 121 | bool canBeDivideByZero() const { 122 | if (mir_->isMod()) 123 | return mir_->toMod()->canBeDivideByZero(); 124 | return mir_->toDiv()->canBeDivideByZero(); 125 | } 126 | wasm::BytecodeOffset bytecodeOffset() const { 127 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 128 | if (mir_->isMod()) 129 | return mir_->toMod()->bytecodeOffset(); 130 | return mir_->toDiv()->bytecodeOffset(); 131 | } 132 | }; 133 | 134 | class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> 135 | { 136 | public: 137 | LIR_HEADER(WasmTruncateToInt64); 138 | 139 | explicit LWasmTruncateToInt64(const LAllocation& in) 140 | : LInstructionHelper(classOpcode) 141 | { 142 | setOperand(0, in); 143 | } 144 | 145 | MWasmTruncateToInt64* mir() const { 146 | return mir_->toWasmTruncateToInt64(); 147 | } 148 | }; 149 | 150 | class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> 151 | { 152 | public: 153 | LIR_HEADER(Int64ToFloatingPoint); 154 | 155 | explicit LInt64ToFloatingPoint(const LInt64Allocation& in) 156 | : LInstructionHelper(classOpcode) 157 | { 158 | setInt64Operand(0, in); 159 | } 160 | 161 | MInt64ToFloatingPoint* mir() const { 162 | return mir_->toInt64ToFloatingPoint(); 163 | } 164 | }; 165 | 166 | } // namespace jit 167 | } // namespace js 168 | 169 | #endif /* jit_mips64_LIR_mips64_h */ 170 | -------------------------------------------------------------------------------- /LIR-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_LIR_ppc64le_h 8 | #define jit_ppc64le_LIR_ppc64le_h 9 | 10 | namespace js { 11 | namespace jit { 12 | 13 | class LUnbox : public LInstructionHelper<1, 1, 0> 14 | { 15 | protected: 16 | LUnbox(LNode::Opcode opcode, const LAllocation& input) 17 | : LInstructionHelper(opcode) 18 | { 19 | setOperand(0, input); 20 | } 21 | 22 | public: 23 | LIR_HEADER(Unbox); 24 | 25 | explicit LUnbox(const LAllocation& input) 26 | : LInstructionHelper(classOpcode) 27 | { 28 | setOperand(0, input); 29 | } 30 | 31 | static const size_t Input = 0; 32 | 33 | MUnbox* mir() const { 34 | return mir_->toUnbox(); 35 | } 36 | const char* extraName() const { 37 | return StringFromMIRType(mir()->type()); 38 | } 39 | }; 40 | 41 | class LUnboxFloatingPoint : public LUnbox 42 | { 43 | MIRType type_; 44 | 45 | public: 46 | LIR_HEADER(UnboxFloatingPoint); 47 | 48 | LUnboxFloatingPoint(const LAllocation& input, MIRType type) 49 | : LUnbox(classOpcode, input), 50 | type_(type) 51 | { } 52 | 53 | MIRType type() const { 54 | return type_; 55 | } 56 | }; 57 | 58 | class LDivOrModI64 : public LBinaryMath<1> 59 | { 60 | public: 61 | LIR_HEADER(DivOrModI64) 62 | 63 | LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) 64 | : LBinaryMath(classOpcode) 65 | { 66 | setOperand(0, lhs); 67 | setOperand(1, rhs); 68 | setTemp(0, temp); 69 | } 70 | 71 | const LDefinition* remainder() { 72 | return getTemp(0); 73 | } 74 | MBinaryArithInstruction* mir() const { 75 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 76 | return static_cast(mir_); 77 | } 78 | 79 | bool canBeDivideByZero() const { 80 | if (mir_->isMod()) 81 | return mir_->toMod()->canBeDivideByZero(); 82 | return mir_->toDiv()->canBeDivideByZero(); 83 | } 84 | bool canBeNegativeOverflow() const { 85 | if (mir_->isMod()) 86 | return mir_->toMod()->canBeNegativeDividend(); 87 | return mir_->toDiv()->canBeNegativeOverflow(); 88 | } 89 | wasm::BytecodeOffset bytecodeOffset() const { 90 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 91 | if (mir_->isMod()) 92 | return mir_->toMod()->bytecodeOffset(); 93 | return mir_->toDiv()->bytecodeOffset(); 94 | } 95 | }; 96 | 97 | class LUDivOrModI64 : public LBinaryMath<1> 98 | { 99 | public: 100 | LIR_HEADER(UDivOrModI64); 101 | 102 | LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) 103 | : LBinaryMath(classOpcode) 104 | { 105 | setOperand(0, lhs); 106 | setOperand(1, rhs); 107 | setTemp(0, temp); 108 | } 109 | 110 | const LDefinition* remainder() { 111 | return getTemp(0); 112 | } 113 | const char* extraName() const { 114 | return mir()->isTruncated() ? "Truncated" : nullptr; 115 | } 116 | 117 | MBinaryArithInstruction* mir() const { 118 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 119 | return static_cast(mir_); 120 | } 121 | bool canBeDivideByZero() const { 122 | if (mir_->isMod()) 123 | return mir_->toMod()->canBeDivideByZero(); 124 | return mir_->toDiv()->canBeDivideByZero(); 125 | } 126 | wasm::BytecodeOffset bytecodeOffset() const { 127 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 128 | if (mir_->isMod()) 129 | return mir_->toMod()->bytecodeOffset(); 130 | return mir_->toDiv()->bytecodeOffset(); 131 | } 132 | }; 133 | 134 | class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> 135 | { 136 | public: 137 | LIR_HEADER(WasmTruncateToInt64); 138 | 139 | explicit LWasmTruncateToInt64(const LAllocation& in) 140 | : LInstructionHelper(classOpcode) 141 | { 142 | setOperand(0, in); 143 | } 144 | 145 | MWasmTruncateToInt64* mir() const { 146 | return mir_->toWasmTruncateToInt64(); 147 | } 148 | }; 149 | 150 | class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> 151 | { 152 | public: 153 | LIR_HEADER(Int64ToFloatingPoint); 154 | 155 | explicit LInt64ToFloatingPoint(const LInt64Allocation& in) 156 | : LInstructionHelper(classOpcode) 157 | { 158 | setInt64Operand(0, in); 159 | } 160 | 161 | MInt64ToFloatingPoint* mir() const { 162 | return mir_->toInt64ToFloatingPoint(); 163 | } 164 | }; 165 | 166 | // Convert a 32-bit unsigned integer to a double. 167 | class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> 168 | { 169 | public: 170 | LIR_HEADER(WasmUint32ToDouble) 171 | 172 | LWasmUint32ToDouble(const LAllocation& input) 173 | : LInstructionHelper(classOpcode) 174 | { 175 | setOperand(0, input); 176 | } 177 | }; 178 | 179 | // Convert a 32-bit unsigned integer to a float32. 180 | class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> 181 | { 182 | public: 183 | LIR_HEADER(WasmUint32ToFloat32) 184 | 185 | LWasmUint32ToFloat32(const LAllocation& input) 186 | : LInstructionHelper(classOpcode) 187 | { 188 | setOperand(0, input); 189 | } 190 | }; 191 | 192 | 193 | class LDivI : public LBinaryMath<1> 194 | { 195 | public: 196 | LIR_HEADER(DivI); 197 | 198 | LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) 199 | : LBinaryMath(classOpcode) 200 | { 201 | setOperand(0, lhs); 202 | setOperand(1, rhs); 203 | setTemp(0, temp); 204 | } 205 | 206 | MDiv* mir() const { 207 | return mir_->toDiv(); 208 | } 209 | }; 210 | 211 | class LDivPowTwoI : public LInstructionHelper<1, 1, 1> 212 | { 213 | const int32_t shift_; 214 | 215 | public: 216 | LIR_HEADER(DivPowTwoI) 217 | 218 | LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp) 219 | : LInstructionHelper(classOpcode), 220 | shift_(shift) 221 | { 222 | setOperand(0, lhs); 223 | setTemp(0, temp); 224 | } 225 | 226 | const LAllocation* numerator() { 227 | return getOperand(0); 228 | } 229 | int32_t shift() const { 230 | return shift_; 231 | } 232 | MDiv* mir() const { 233 | return mir_->toDiv(); 234 | } 235 | }; 236 | 237 | class LModI : public LBinaryMath<1> 238 | { 239 | public: 240 | LIR_HEADER(ModI); 241 | 242 | LModI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& callTemp) 243 | : LBinaryMath(classOpcode) 244 | { 245 | setOperand(0, lhs); 246 | setOperand(1, rhs); 247 | setTemp(0, callTemp); 248 | } 249 | 250 | const LDefinition* callTemp() { 251 | return getTemp(0); 252 | } 253 | MMod* mir() const { 254 | return mir_->toMod(); 255 | } 256 | }; 257 | 258 | class LModPowTwoI : public LInstructionHelper<1, 1, 0> 259 | { 260 | const int32_t shift_; 261 | 262 | public: 263 | LIR_HEADER(ModPowTwoI); 264 | 265 | LModPowTwoI(const LAllocation& lhs, int32_t shift) 266 | : LInstructionHelper(classOpcode), 267 | shift_(shift) 268 | { 269 | setOperand(0, lhs); 270 | } 271 | 272 | int32_t shift() const { 273 | return shift_; 274 | } 275 | MMod* mir() const { 276 | return mir_->toMod(); 277 | } 278 | }; 279 | 280 | class LModMaskI : public LInstructionHelper<1, 1, 2> 281 | { 282 | const int32_t shift_; 283 | 284 | public: 285 | LIR_HEADER(ModMaskI); 286 | 287 | LModMaskI(const LAllocation& lhs, const LDefinition& temp0, const LDefinition& temp1, 288 | int32_t shift) 289 | : LInstructionHelper(classOpcode), 290 | shift_(shift) 291 | { 292 | setOperand(0, lhs); 293 | setTemp(0, temp0); 294 | setTemp(1, temp1); 295 | } 296 | 297 | int32_t shift() const { 298 | return shift_; 299 | } 300 | MMod* mir() const { 301 | return mir_->toMod(); 302 | } 303 | }; 304 | 305 | // Takes a tableswitch with an integer to decide 306 | class LTableSwitch : public LInstructionHelper<0, 1, 2> 307 | { 308 | public: 309 | LIR_HEADER(TableSwitch); 310 | 311 | LTableSwitch(const LAllocation& in, const LDefinition& inputCopy, 312 | const LDefinition& jumpTablePointer, MTableSwitch* ins) 313 | : LInstructionHelper(classOpcode) 314 | { 315 | setOperand(0, in); 316 | setTemp(0, inputCopy); 317 | setTemp(1, jumpTablePointer); 318 | setMir(ins); 319 | } 320 | 321 | MTableSwitch* mir() const { 322 | return mir_->toTableSwitch(); 323 | } 324 | const LAllocation* index() { 325 | return getOperand(0); 326 | } 327 | const LDefinition* tempInt() { 328 | return getTemp(0); 329 | } 330 | // This is added to share the same CodeGenerator prefixes. 331 | const LDefinition* tempPointer() { 332 | return getTemp(1); 333 | } 334 | }; 335 | 336 | // Takes a tableswitch with an integer to decide 337 | class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> 338 | { 339 | public: 340 | LIR_HEADER(TableSwitchV); 341 | 342 | LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy, 343 | const LDefinition& floatCopy, const LDefinition& jumpTablePointer, 344 | MTableSwitch* ins) 345 | : LInstructionHelper(classOpcode) 346 | { 347 | setBoxOperand(InputValue, input); 348 | setTemp(0, inputCopy); 349 | setTemp(1, floatCopy); 350 | setTemp(2, jumpTablePointer); 351 | setMir(ins); 352 | } 353 | 354 | MTableSwitch* mir() const { 355 | return mir_->toTableSwitch(); 356 | } 357 | 358 | static const size_t InputValue = 0; 359 | 360 | const LDefinition* tempInt() { 361 | return getTemp(0); 362 | } 363 | const LDefinition* tempFloat() { 364 | return getTemp(1); 365 | } 366 | const LDefinition* tempPointer() { 367 | return getTemp(2); 368 | } 369 | }; 370 | 371 | class LMulI : public LBinaryMath<0> 372 | { 373 | public: 374 | LIR_HEADER(MulI); 375 | 376 | LMulI() 377 | : LBinaryMath(classOpcode) 378 | {} 379 | 380 | MMul* mir() { 381 | return mir_->toMul(); 382 | } 383 | }; 384 | 385 | class LUDivOrMod : public LBinaryMath<0> 386 | { 387 | public: 388 | LIR_HEADER(UDivOrMod); 389 | 390 | LUDivOrMod() 391 | : LBinaryMath(classOpcode) 392 | {} 393 | 394 | MBinaryArithInstruction* mir() const { 395 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 396 | return static_cast(mir_); 397 | } 398 | 399 | bool canBeDivideByZero() const { 400 | if (mir_->isMod()) 401 | return mir_->toMod()->canBeDivideByZero(); 402 | return mir_->toDiv()->canBeDivideByZero(); 403 | } 404 | 405 | bool trapOnError() const { 406 | if (mir_->isMod()) 407 | return mir_->toMod()->trapOnError(); 408 | return mir_->toDiv()->trapOnError(); 409 | } 410 | 411 | wasm::BytecodeOffset bytecodeOffset() const { 412 | MOZ_ASSERT(mir_->isDiv() || mir_->isMod()); 413 | if (mir_->isMod()) 414 | return mir_->toMod()->bytecodeOffset(); 415 | return mir_->toDiv()->bytecodeOffset(); 416 | } 417 | }; 418 | 419 | namespace details { 420 | 421 | // Base class for the int64 and non-int64 variants. 422 | template 423 | class LWasmUnalignedLoadBase : public details::LWasmLoadBase 424 | { 425 | public: 426 | typedef LWasmLoadBase Base; 427 | 428 | explicit LWasmUnalignedLoadBase(LNode::Opcode opcode, const LAllocation& ptr, 429 | const LDefinition& valueHelper) 430 | : Base(opcode, ptr, LAllocation()) 431 | { 432 | Base::setTemp(0, LDefinition::BogusTemp()); 433 | Base::setTemp(1, valueHelper); 434 | } 435 | 436 | const LAllocation* ptr() { 437 | return Base::getOperand(0); 438 | } 439 | const LDefinition* ptrCopy() { 440 | return Base::getTemp(0); 441 | } 442 | }; 443 | 444 | } // namespace details 445 | 446 | class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1> 447 | { 448 | public: 449 | LIR_HEADER(WasmUnalignedLoad); 450 | 451 | explicit LWasmUnalignedLoad(const LAllocation& ptr, const LDefinition& valueHelper) 452 | : LWasmUnalignedLoadBase(classOpcode, ptr, valueHelper) 453 | {} 454 | }; 455 | 456 | class LWasmUnalignedLoadI64 : public details::LWasmUnalignedLoadBase 457 | { 458 | public: 459 | LIR_HEADER(WasmUnalignedLoadI64); 460 | 461 | explicit LWasmUnalignedLoadI64(const LAllocation& ptr, const LDefinition& valueHelper) 462 | : LWasmUnalignedLoadBase(classOpcode, ptr, valueHelper) 463 | {} 464 | }; 465 | 466 | namespace details { 467 | 468 | // Base class for the int64 and non-int64 variants. 469 | template 470 | class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2> 471 | { 472 | public: 473 | typedef LInstructionHelper<0, NumOps, 2> Base; 474 | 475 | static const size_t PtrIndex = 0; 476 | static const size_t ValueIndex = 1; 477 | 478 | LWasmUnalignedStoreBase(LNode::Opcode opcode, const LAllocation& ptr, 479 | const LDefinition& valueHelper) 480 | : Base(opcode) 481 | { 482 | Base::setOperand(0, ptr); 483 | Base::setTemp(0, LDefinition::BogusTemp()); 484 | Base::setTemp(1, valueHelper); 485 | } 486 | 487 | MWasmStore* mir() const { 488 | return Base::mir_->toWasmStore(); 489 | } 490 | const LAllocation* ptr() { 491 | return Base::getOperand(PtrIndex); 492 | } 493 | const LDefinition* ptrCopy() { 494 | return Base::getTemp(0); 495 | } 496 | }; 497 | 498 | } // namespace details 499 | 500 | class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<2> 501 | { 502 | public: 503 | LIR_HEADER(WasmUnalignedStore); 504 | 505 | LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value, 506 | const LDefinition& valueHelper) 507 | : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) 508 | { 509 | setOperand(1, value); 510 | } 511 | 512 | const LAllocation* value() { 513 | return Base::getOperand(ValueIndex); 514 | } 515 | }; 516 | 517 | class LWasmUnalignedStoreI64 : public details::LWasmUnalignedStoreBase<1 + INT64_PIECES> 518 | { 519 | public: 520 | LIR_HEADER(WasmUnalignedStoreI64); 521 | LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value, 522 | const LDefinition& valueHelper) 523 | : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) 524 | { 525 | setInt64Operand(1, value); 526 | } 527 | 528 | const LInt64Allocation value() { 529 | return getInt64Operand(ValueIndex); 530 | } 531 | }; 532 | 533 | class LWasmCompareExchangeI64 : public LInstructionHelper 534 | { 535 | public: 536 | LIR_HEADER(WasmCompareExchangeI64); 537 | 538 | LWasmCompareExchangeI64(const LAllocation& ptr, const LInt64Allocation& oldValue, 539 | const LInt64Allocation& newValue) 540 | : LInstructionHelper(classOpcode) 541 | { 542 | setOperand(0, ptr); 543 | setInt64Operand(1, oldValue); 544 | setInt64Operand(1 + INT64_PIECES, newValue); 545 | } 546 | 547 | const LAllocation* ptr() { 548 | return getOperand(0); 549 | } 550 | const LInt64Allocation oldValue() { 551 | return getInt64Operand(1); 552 | } 553 | const LInt64Allocation newValue() { 554 | return getInt64Operand(1 + INT64_PIECES); 555 | } 556 | const MWasmCompareExchangeHeap* mir() const { 557 | return mir_->toWasmCompareExchangeHeap(); 558 | } 559 | }; 560 | 561 | class LWasmAtomicExchangeI64 : public LInstructionHelper 562 | { 563 | public: 564 | LIR_HEADER(WasmAtomicExchangeI64); 565 | 566 | LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value) 567 | : LInstructionHelper(classOpcode) 568 | { 569 | setOperand(0, ptr); 570 | setInt64Operand(1, value); 571 | } 572 | 573 | const LAllocation* ptr() { 574 | return getOperand(0); 575 | } 576 | const LInt64Allocation value() { 577 | return getInt64Operand(1); 578 | } 579 | const MWasmAtomicExchangeHeap* mir() const { 580 | return mir_->toWasmAtomicExchangeHeap(); 581 | } 582 | }; 583 | 584 | class LWasmAtomicBinopI64 : public LInstructionHelper 585 | { 586 | public: 587 | LIR_HEADER(WasmAtomicBinopI64); 588 | 589 | LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value) 590 | : LInstructionHelper(classOpcode) 591 | { 592 | setOperand(0, ptr); 593 | setInt64Operand(1, value); 594 | } 595 | 596 | const LAllocation* ptr() { 597 | return getOperand(0); 598 | } 599 | const LInt64Allocation value() { 600 | return getInt64Operand(1); 601 | } 602 | const MWasmAtomicBinopHeap* mir() const { 603 | return mir_->toWasmAtomicBinopHeap(); 604 | } 605 | }; 606 | 607 | 608 | } // namespace jit 609 | } // namespace js 610 | 611 | #endif /* jit_ppc64le_LIR_ppc64le_h */ 612 | -------------------------------------------------------------------------------- /Lowering-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_Lowering_mips_shared_h 8 | #define jit_mips_shared_Lowering_mips_shared_h 9 | 10 | #include "jit/shared/Lowering-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class LIRGeneratorMIPSShared : public LIRGeneratorShared 16 | { 17 | protected: 18 | LIRGeneratorMIPSShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph) 19 | : LIRGeneratorShared(gen, graph, lirGraph) 20 | { } 21 | 22 | // x86 has constraints on what registers can be formatted for 1-byte 23 | // stores and loads; on MIPS all registers are okay. 24 | LAllocation useByteOpRegister(MDefinition* mir); 25 | LAllocation useByteOpRegisterAtStart(MDefinition* mir); 26 | LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir); 27 | LDefinition tempByteOpRegister(); 28 | 29 | bool needTempForPostBarrier() { return false; } 30 | 31 | void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs, 32 | MDefinition* rhs); 33 | void lowerUrshD(MUrsh* mir); 34 | 35 | void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, 36 | MDefinition* input); 37 | void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, 38 | MDefinition* lhs, MDefinition* rhs); 39 | 40 | void lowerForALUInt64(LInstructionHelper* ins, 41 | MDefinition* mir, MDefinition* lhs, MDefinition* rhs); 42 | void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs); 43 | template 44 | void lowerForShiftInt64(LInstructionHelper* ins, 45 | MDefinition* mir, MDefinition* lhs, MDefinition* rhs); 46 | 47 | void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, 48 | MDefinition* src); 49 | template 50 | void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, 51 | MDefinition* lhs, MDefinition* rhs); 52 | 53 | void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir, 54 | MDefinition* lhs, MDefinition* rhs) 55 | { 56 | return lowerForFPU(ins, mir, lhs, rhs); 57 | } 58 | void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, 59 | MDefinition* lhs, MDefinition* rhs) 60 | { 61 | return lowerForFPU(ins, mir, lhs, rhs); 62 | } 63 | 64 | void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir, 65 | MDefinition* lhs, MDefinition* rhs); 66 | void lowerDivI(MDiv* div); 67 | void lowerModI(MMod* mod); 68 | void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs); 69 | void lowerUDiv(MDiv* div); 70 | void lowerUMod(MMod* mod); 71 | 72 | LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy, 73 | MTableSwitch* ins); 74 | LTableSwitchV* newLTableSwitchV(MTableSwitch* ins); 75 | 76 | void lowerPhi(MPhi* phi); 77 | }; 78 | 79 | } // namespace jit 80 | } // namespace js 81 | 82 | #endif /* jit_mips_shared_Lowering_mips_shared_h */ 83 | -------------------------------------------------------------------------------- /Lowering-mips64.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips64/Lowering-mips64.h" 8 | 9 | #include "jit/Lowering.h" 10 | #include "jit/mips64/Assembler-mips64.h" 11 | #include "jit/MIR.h" 12 | 13 | #include "jit/shared/Lowering-shared-inl.h" 14 | 15 | using namespace js; 16 | using namespace js::jit; 17 | 18 | void 19 | LIRGeneratorMIPS64::defineInt64Phi(MPhi* phi, size_t lirIndex) 20 | { 21 | defineTypedPhi(phi, lirIndex); 22 | } 23 | 24 | void 25 | LIRGeneratorMIPS64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, 26 | LBlock* block, size_t lirIndex) 27 | { 28 | lowerTypedPhiInput(phi, inputPosition, block, lirIndex); 29 | } 30 | 31 | LBoxAllocation 32 | LIRGeneratorMIPS64::useBoxFixed(MDefinition* mir, Register reg1, Register reg2, bool useAtStart) 33 | { 34 | MOZ_ASSERT(mir->type() == MIRType::Value); 35 | 36 | ensureDefined(mir); 37 | return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart)); 38 | } 39 | 40 | void 41 | LIRGeneratorMIPS64::lowerDivI64(MDiv* div) 42 | { 43 | if (div->isUnsigned()) { 44 | lowerUDivI64(div); 45 | return; 46 | } 47 | 48 | LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), 49 | temp()); 50 | defineInt64(lir, div); 51 | } 52 | 53 | void 54 | LIRGeneratorMIPS64::lowerModI64(MMod* mod) 55 | { 56 | if (mod->isUnsigned()) { 57 | lowerUModI64(mod); 58 | return; 59 | } 60 | 61 | LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), 62 | temp()); 63 | defineInt64(lir, mod); 64 | } 65 | 66 | void 67 | LIRGeneratorMIPS64::lowerUDivI64(MDiv* div) 68 | { 69 | LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(div->lhs()), 70 | useRegister(div->rhs()), 71 | temp()); 72 | defineInt64(lir, div); 73 | } 74 | 75 | void 76 | LIRGeneratorMIPS64::lowerUModI64(MMod* mod) 77 | { 78 | LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useRegister(mod->lhs()), 79 | useRegister(mod->rhs()), 80 | temp()); 81 | defineInt64(lir, mod); 82 | } 83 | 84 | void 85 | LIRGenerator::visitBox(MBox* box) 86 | { 87 | MDefinition* opd = box->getOperand(0); 88 | 89 | // If the operand is a constant, emit near its uses. 90 | if (opd->isConstant() && box->canEmitAtUses()) { 91 | emitAtUses(box); 92 | return; 93 | } 94 | 95 | if (opd->isConstant()) { 96 | define(new(alloc()) LValue(opd->toConstant()->toJSValue()), box, LDefinition(LDefinition::BOX)); 97 | } else { 98 | LBox* ins = new(alloc()) LBox(useRegister(opd), opd->type()); 99 | define(ins, box, LDefinition(LDefinition::BOX)); 100 | } 101 | } 102 | 103 | void 104 | LIRGenerator::visitUnbox(MUnbox* unbox) 105 | { 106 | MDefinition* box = unbox->getOperand(0); 107 | 108 | if (box->type() == MIRType::ObjectOrNull) { 109 | LUnboxObjectOrNull* lir = new(alloc()) LUnboxObjectOrNull(useRegisterAtStart(box)); 110 | if (unbox->fallible()) 111 | assignSnapshot(lir, unbox->bailoutKind()); 112 | defineReuseInput(lir, unbox, 0); 113 | return; 114 | } 115 | 116 | MOZ_ASSERT(box->type() == MIRType::Value); 117 | 118 | LUnbox* lir; 119 | if (IsFloatingPointType(unbox->type())) { 120 | lir = new(alloc()) LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type()); 121 | } else if (unbox->fallible()) { 122 | // If the unbox is fallible, load the Value in a register first to 123 | // avoid multiple loads. 124 | lir = new(alloc()) LUnbox(useRegisterAtStart(box)); 125 | } else { 126 | lir = new(alloc()) LUnbox(useAtStart(box)); 127 | } 128 | 129 | if (unbox->fallible()) 130 | assignSnapshot(lir, unbox->bailoutKind()); 131 | 132 | define(lir, unbox); 133 | } 134 | 135 | void 136 | LIRGenerator::visitReturn(MReturn* ret) 137 | { 138 | MDefinition* opd = ret->getOperand(0); 139 | MOZ_ASSERT(opd->type() == MIRType::Value); 140 | 141 | LReturn* ins = new(alloc()) LReturn; 142 | ins->setOperand(0, useFixed(opd, JSReturnReg)); 143 | add(ins); 144 | } 145 | 146 | void 147 | LIRGeneratorMIPS64::defineUntypedPhi(MPhi* phi, size_t lirIndex) 148 | { 149 | defineTypedPhi(phi, lirIndex); 150 | } 151 | 152 | void 153 | LIRGeneratorMIPS64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, 154 | LBlock* block, size_t lirIndex) 155 | { 156 | lowerTypedPhiInput(phi, inputPosition, block, lirIndex); 157 | } 158 | 159 | void 160 | LIRGeneratorMIPS64::lowerTruncateDToInt32(MTruncateToInt32* ins) 161 | { 162 | MDefinition* opd = ins->input(); 163 | MOZ_ASSERT(opd->type() == MIRType::Double); 164 | 165 | define(new(alloc()) 166 | LTruncateDToInt32(useRegister(opd), tempDouble()), ins); 167 | } 168 | 169 | void 170 | LIRGeneratorMIPS64::lowerTruncateFToInt32(MTruncateToInt32* ins) 171 | { 172 | MDefinition* opd = ins->input(); 173 | MOZ_ASSERT(opd->type() == MIRType::Float32); 174 | 175 | define(new(alloc()) 176 | LTruncateFToInt32(useRegister(opd), tempFloat32()), ins); 177 | } 178 | 179 | void 180 | LIRGenerator::visitRandom(MRandom* ins) 181 | { 182 | LRandom *lir = new(alloc()) LRandom(temp(), temp(), temp()); 183 | defineFixed(lir, ins, LFloatReg(ReturnDoubleReg)); 184 | } 185 | 186 | 187 | void 188 | LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) 189 | { 190 | MDefinition* opd = ins->input(); 191 | MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32); 192 | 193 | defineInt64(new(alloc()) LWasmTruncateToInt64(useRegister(opd)), ins); 194 | } 195 | 196 | void 197 | LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) 198 | { 199 | MDefinition* opd = ins->input(); 200 | MOZ_ASSERT(opd->type() == MIRType::Int64); 201 | MOZ_ASSERT(IsFloatingPointType(ins->type())); 202 | 203 | define(new(alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins); 204 | } 205 | -------------------------------------------------------------------------------- /Lowering-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_Lowering_mips64_h 8 | #define jit_mips64_Lowering_mips64_h 9 | 10 | #include "jit/mips-shared/Lowering-mips-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class LIRGeneratorMIPS64 : public LIRGeneratorMIPSShared 16 | { 17 | protected: 18 | LIRGeneratorMIPS64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph) 19 | : LIRGeneratorMIPSShared(gen, graph, lirGraph) 20 | { } 21 | 22 | void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t); 23 | void defineInt64Phi(MPhi*, size_t); 24 | 25 | // Returns a box allocation. reg2 is ignored on 64-bit platforms. 26 | LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2, 27 | bool useAtStart = false); 28 | 29 | inline LDefinition tempToUnbox() { 30 | return temp(); 31 | } 32 | 33 | void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex); 34 | void defineUntypedPhi(MPhi* phi, size_t lirIndex); 35 | 36 | void lowerTruncateDToInt32(MTruncateToInt32* ins); 37 | void lowerTruncateFToInt32(MTruncateToInt32* ins); 38 | 39 | void lowerDivI64(MDiv* div); 40 | void lowerModI64(MMod* mod); 41 | void lowerUDivI64(MDiv* div); 42 | void lowerUModI64(MMod* mod); 43 | }; 44 | 45 | typedef LIRGeneratorMIPS64 LIRGeneratorSpecific; 46 | 47 | } // namespace jit 48 | } // namespace js 49 | 50 | #endif /* jit_mips64_Lowering_mips64_h */ 51 | -------------------------------------------------------------------------------- /Lowering-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_Lowering_ppc64le_h 8 | #define jit_ppc64le_Lowering_ppc64le_h 9 | 10 | #include "jit/shared/Lowering-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class LIRGeneratorPPC64LE : public LIRGeneratorShared 16 | { 17 | protected: 18 | LIRGeneratorPPC64LE(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph) 19 | : LIRGeneratorShared(gen, graph, lirGraph) 20 | { } 21 | 22 | // x86 has constraints on what registers can be formatted for 1-byte 23 | // stores and loads, but on Power all GPRs are okay. 24 | LAllocation useByteOpRegister(MDefinition* mir); 25 | LAllocation useByteOpRegisterAtStart(MDefinition* mir); 26 | LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir); 27 | LDefinition tempByteOpRegister(); 28 | 29 | bool needTempForPostBarrier() { return false; } 30 | 31 | void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs, 32 | MDefinition* rhs); 33 | void lowerUrshD(MUrsh* mir); 34 | 35 | void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, 36 | MDefinition* input); 37 | void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, 38 | MDefinition* lhs, MDefinition* rhs); 39 | 40 | void lowerForALUInt64(LInstructionHelper* ins, 41 | MDefinition* mir, MDefinition* lhs, MDefinition* rhs); 42 | void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs, MDefinition* rhs); 43 | template 44 | void lowerForShiftInt64(LInstructionHelper* ins, 45 | MDefinition* mir, MDefinition* lhs, MDefinition* rhs); 46 | 47 | void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir, 48 | MDefinition* src); 49 | template 50 | void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, 51 | MDefinition* lhs, MDefinition* rhs); 52 | 53 | void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir, 54 | MDefinition* lhs, MDefinition* rhs) 55 | { 56 | return lowerForFPU(ins, mir, lhs, rhs); 57 | } 58 | void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, 59 | MDefinition* lhs, MDefinition* rhs) 60 | { 61 | return lowerForFPU(ins, mir, lhs, rhs); 62 | } 63 | 64 | void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir, 65 | MDefinition* lhs, MDefinition* rhs); 66 | void lowerDivI(MDiv* div); 67 | void lowerModI(MMod* mod); 68 | void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs); 69 | void lowerUDiv(MDiv* div); 70 | void lowerUMod(MMod* mod); 71 | 72 | LTableSwitch* newLTableSwitch(const LAllocation& in, const LDefinition& inputCopy, 73 | MTableSwitch* ins); 74 | LTableSwitchV* newLTableSwitchV(MTableSwitch* ins); 75 | 76 | void lowerPhi(MPhi* phi); 77 | void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t); 78 | void defineInt64Phi(MPhi*, size_t); 79 | 80 | // Returns a box allocation. reg2 is ignored since we're 64-bit. 81 | LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2, 82 | bool useAtStart = false); 83 | 84 | inline LDefinition tempToUnbox() { 85 | return temp(); 86 | } 87 | 88 | void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex); 89 | void defineUntypedPhi(MPhi* phi, size_t lirIndex); 90 | 91 | void lowerTruncateDToInt32(MTruncateToInt32* ins); 92 | void lowerTruncateFToInt32(MTruncateToInt32* ins); 93 | 94 | void lowerDivI64(MDiv* div); 95 | void lowerModI64(MMod* mod); 96 | void lowerUDivI64(MDiv* div); 97 | void lowerUModI64(MMod* mod); 98 | }; 99 | 100 | typedef LIRGeneratorPPC64LE LIRGeneratorSpecific; 101 | 102 | } // namespace jit 103 | } // namespace js 104 | 105 | #endif /* jit_ppc64le_Lowering_ppc64le_h */ 106 | -------------------------------------------------------------------------------- /MacroAssembler-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_MacroAssembler_mips_shared_h 8 | #define jit_mips_shared_MacroAssembler_mips_shared_h 9 | 10 | #if defined(JS_CODEGEN_MIPS32) 11 | # include "jit/mips32/Assembler-mips32.h" 12 | #elif defined(JS_CODEGEN_MIPS64) 13 | # include "jit/mips64/Assembler-mips64.h" 14 | #endif 15 | 16 | #include "jit/AtomicOp.h" 17 | 18 | namespace js { 19 | namespace jit { 20 | 21 | enum LoadStoreSize 22 | { 23 | SizeByte = 8, 24 | SizeHalfWord = 16, 25 | SizeWord = 32, 26 | SizeDouble = 64 27 | }; 28 | 29 | enum LoadStoreExtension 30 | { 31 | ZeroExtend = 0, 32 | SignExtend = 1 33 | }; 34 | 35 | enum JumpKind 36 | { 37 | LongJump = 0, 38 | ShortJump = 1 39 | }; 40 | 41 | enum DelaySlotFill 42 | { 43 | DontFillDelaySlot = 0, 44 | FillDelaySlot = 1 45 | }; 46 | 47 | static Register CallReg = t9; 48 | 49 | class MacroAssemblerMIPSShared : public Assembler 50 | { 51 | protected: 52 | // Perform a downcast. Should be removed by Bug 996602. 53 | MacroAssembler& asMasm(); 54 | const MacroAssembler& asMasm() const; 55 | 56 | Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c); 57 | Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c); 58 | 59 | void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, 60 | DoubleCondition c, FloatTestKind* testKind, 61 | FPConditionBit fcc = FCC0); 62 | 63 | public: 64 | void ma_move(Register rd, Register rs); 65 | 66 | void ma_li(Register dest, ImmGCPtr ptr); 67 | 68 | void ma_li(Register dest, Imm32 imm); 69 | void ma_liPatchable(Register dest, Imm32 imm); 70 | 71 | // Shift operations 72 | void ma_sll(Register rd, Register rt, Imm32 shift); 73 | void ma_srl(Register rd, Register rt, Imm32 shift); 74 | void ma_sra(Register rd, Register rt, Imm32 shift); 75 | void ma_ror(Register rd, Register rt, Imm32 shift); 76 | void ma_rol(Register rd, Register rt, Imm32 shift); 77 | 78 | void ma_sll(Register rd, Register rt, Register shift); 79 | void ma_srl(Register rd, Register rt, Register shift); 80 | void ma_sra(Register rd, Register rt, Register shift); 81 | void ma_ror(Register rd, Register rt, Register shift); 82 | void ma_rol(Register rd, Register rt, Register shift); 83 | 84 | // Negate 85 | void ma_negu(Register rd, Register rs); 86 | 87 | void ma_not(Register rd, Register rs); 88 | 89 | // Bit extract/insert 90 | void ma_ext(Register rt, Register rs, uint16_t pos, uint16_t size); 91 | void ma_ins(Register rt, Register rs, uint16_t pos, uint16_t size); 92 | 93 | // Sign extend 94 | void ma_seb(Register rd, Register rt); 95 | void ma_seh(Register rd, Register rt); 96 | 97 | // and 98 | void ma_and(Register rd, Register rs); 99 | void ma_and(Register rd, Imm32 imm); 100 | void ma_and(Register rd, Register rs, Imm32 imm); 101 | 102 | // or 103 | void ma_or(Register rd, Register rs); 104 | void ma_or(Register rd, Imm32 imm); 105 | void ma_or(Register rd, Register rs, Imm32 imm); 106 | 107 | // xor 108 | void ma_xor(Register rd, Register rs); 109 | void ma_xor(Register rd, Imm32 imm); 110 | void ma_xor(Register rd, Register rs, Imm32 imm); 111 | 112 | void ma_ctz(Register rd, Register rs); 113 | 114 | // load 115 | void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord, 116 | LoadStoreExtension extension = SignExtend); 117 | void ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src, Register temp, 118 | LoadStoreSize size, LoadStoreExtension extension); 119 | 120 | // store 121 | void ma_store(Register data, const BaseIndex& dest, LoadStoreSize size = SizeWord, 122 | LoadStoreExtension extension = SignExtend); 123 | void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord, 124 | LoadStoreExtension extension = SignExtend); 125 | void ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest, Register temp, 126 | LoadStoreSize size, LoadStoreExtension extension); 127 | 128 | // arithmetic based ops 129 | // add 130 | void ma_addu(Register rd, Register rs, Imm32 imm); 131 | void ma_addu(Register rd, Register rs); 132 | void ma_addu(Register rd, Imm32 imm); 133 | void ma_addTestCarry(Condition cond, Register rd, Register rs, Register rt, Label* overflow); 134 | void ma_addTestCarry(Condition cond, Register rd, Register rs, Imm32 imm, Label* overflow); 135 | 136 | // subtract 137 | void ma_subu(Register rd, Register rs, Imm32 imm); 138 | void ma_subu(Register rd, Register rs); 139 | void ma_subu(Register rd, Imm32 imm); 140 | void ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow); 141 | 142 | // multiplies. For now, there are only few that we care about. 143 | void ma_mul(Register rd, Register rs, Imm32 imm); 144 | void ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label* overflow); 145 | void ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow); 146 | 147 | // divisions 148 | void ma_div_branch_overflow(Register rd, Register rs, Register rt, Label* overflow); 149 | void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow); 150 | 151 | // fast mod, uses scratch registers, and thus needs to be in the assembler 152 | // implicitly assumes that we can overwrite dest at the beginning of the sequence 153 | void ma_mod_mask(Register src, Register dest, Register hold, Register remain, 154 | int32_t shift, Label* negZero = nullptr); 155 | 156 | // branches when done from within mips-specific code 157 | void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = LongJump); 158 | void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = LongJump); 159 | void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump); 160 | void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = LongJump) { 161 | MOZ_ASSERT(lhs != ScratchRegister); 162 | ma_li(ScratchRegister, imm); 163 | ma_b(lhs, ScratchRegister, l, c, jumpKind); 164 | } 165 | 166 | void ma_b(Label* l, JumpKind jumpKind = LongJump); 167 | 168 | // fp instructions 169 | void ma_lis(FloatRegister dest, float value); 170 | 171 | void ma_sd(FloatRegister src, BaseIndex address); 172 | void ma_ss(FloatRegister src, BaseIndex address); 173 | 174 | void ma_ld(FloatRegister dest, const BaseIndex& src); 175 | void ma_ls(FloatRegister dest, const BaseIndex& src); 176 | 177 | //FP branches 178 | void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c, 179 | JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0); 180 | void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c, 181 | JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0); 182 | 183 | void ma_call(ImmPtr dest); 184 | 185 | void ma_jump(ImmPtr dest); 186 | 187 | void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c); 188 | void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c); 189 | void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c); 190 | void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c); 191 | 192 | void moveToDoubleLo(Register src, FloatRegister dest) { 193 | as_mtc1(src, dest); 194 | } 195 | void moveFromDoubleLo(FloatRegister src, Register dest) { 196 | as_mfc1(dest, src); 197 | } 198 | 199 | void moveToFloat32(Register src, FloatRegister dest) { 200 | as_mtc1(src, dest); 201 | } 202 | void moveFromFloat32(FloatRegister src, Register dest) { 203 | as_mfc1(dest, src); 204 | } 205 | 206 | // Evaluate srcDest = minmax{Float32,Double}(srcDest, other). 207 | // Handle NaN specially if handleNaN is true. 208 | void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax); 209 | void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax); 210 | 211 | void loadDouble(const Address& addr, FloatRegister dest); 212 | void loadDouble(const BaseIndex& src, FloatRegister dest); 213 | 214 | // Load a float value into a register, then expand it to a double. 215 | void loadFloatAsDouble(const Address& addr, FloatRegister dest); 216 | void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest); 217 | 218 | void loadFloat32(const Address& addr, FloatRegister dest); 219 | void loadFloat32(const BaseIndex& src, FloatRegister dest); 220 | 221 | void outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output, MIRType fromType, 222 | TruncFlags flags, Label* rejoin, 223 | wasm::BytecodeOffset trapOffset); 224 | void outOfLineWasmTruncateToInt64Check(FloatRegister input, Register64 output, MIRType fromType, 225 | TruncFlags flags, Label* rejoin, 226 | wasm::BytecodeOffset trapOffset); 227 | 228 | protected: 229 | void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr, 230 | Register ptrScratch, AnyRegister output, Register tmp); 231 | void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value, Register memoryBase, 232 | Register ptr, Register ptrScratch, Register tmp); 233 | }; 234 | 235 | } // namespace jit 236 | } // namespace js 237 | 238 | #endif /* jit_mips_shared_MacroAssembler_mips_shared_h */ 239 | -------------------------------------------------------------------------------- /MoveEmitter-mips-shared.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips-shared/MoveEmitter-mips-shared.h" 8 | 9 | #include "jit/MacroAssembler-inl.h" 10 | 11 | using namespace js; 12 | using namespace js::jit; 13 | 14 | void 15 | MoveEmitterMIPSShared::emit(const MoveResolver& moves) 16 | { 17 | if (moves.numCycles()) { 18 | // Reserve stack for cycle resolution 19 | masm.reserveStack(moves.numCycles() * sizeof(double)); 20 | pushedAtCycle_ = masm.framePushed(); 21 | } 22 | 23 | for (size_t i = 0; i < moves.numMoves(); i++) 24 | emit(moves.getMove(i)); 25 | } 26 | 27 | Address 28 | MoveEmitterMIPSShared::cycleSlot(uint32_t slot, uint32_t subslot) const 29 | { 30 | int32_t offset = masm.framePushed() - pushedAtCycle_; 31 | MOZ_ASSERT(Imm16::IsInSignedRange(offset)); 32 | return Address(StackPointer, offset + slot * sizeof(double) + subslot); 33 | } 34 | 35 | int32_t 36 | MoveEmitterMIPSShared::getAdjustedOffset(const MoveOperand& operand) 37 | { 38 | MOZ_ASSERT(operand.isMemoryOrEffectiveAddress()); 39 | if (operand.base() != StackPointer) 40 | return operand.disp(); 41 | 42 | // Adjust offset if stack pointer has been moved. 43 | return operand.disp() + masm.framePushed() - pushedAtStart_; 44 | } 45 | 46 | Address 47 | MoveEmitterMIPSShared::getAdjustedAddress(const MoveOperand& operand) 48 | { 49 | return Address(operand.base(), getAdjustedOffset(operand)); 50 | } 51 | 52 | 53 | Register 54 | MoveEmitterMIPSShared::tempReg() 55 | { 56 | spilledReg_ = SecondScratchReg; 57 | return SecondScratchReg; 58 | } 59 | 60 | void 61 | MoveEmitterMIPSShared::emitMove(const MoveOperand& from, const MoveOperand& to) 62 | { 63 | if (from.isGeneralReg()) { 64 | // Second scratch register should not be moved by MoveEmitter. 65 | MOZ_ASSERT(from.reg() != spilledReg_); 66 | 67 | if (to.isGeneralReg()) 68 | masm.movePtr(from.reg(), to.reg()); 69 | else if (to.isMemory()) 70 | masm.storePtr(from.reg(), getAdjustedAddress(to)); 71 | else 72 | MOZ_CRASH("Invalid emitMove arguments."); 73 | } else if (from.isMemory()) { 74 | if (to.isGeneralReg()) { 75 | masm.loadPtr(getAdjustedAddress(from), to.reg()); 76 | } else if (to.isMemory()) { 77 | masm.loadPtr(getAdjustedAddress(from), tempReg()); 78 | masm.storePtr(tempReg(), getAdjustedAddress(to)); 79 | } else { 80 | MOZ_CRASH("Invalid emitMove arguments."); 81 | } 82 | } else if (from.isEffectiveAddress()) { 83 | if (to.isGeneralReg()) { 84 | masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg()); 85 | } else if (to.isMemory()) { 86 | masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg()); 87 | masm.storePtr(tempReg(), getAdjustedAddress(to)); 88 | } else { 89 | MOZ_CRASH("Invalid emitMove arguments."); 90 | } 91 | } else { 92 | MOZ_CRASH("Invalid emitMove arguments."); 93 | } 94 | } 95 | 96 | void 97 | MoveEmitterMIPSShared::emitInt32Move(const MoveOperand &from, const MoveOperand &to) 98 | { 99 | if (from.isGeneralReg()) { 100 | // Second scratch register should not be moved by MoveEmitter. 101 | MOZ_ASSERT(from.reg() != spilledReg_); 102 | 103 | if (to.isGeneralReg()) 104 | masm.move32(from.reg(), to.reg()); 105 | else if (to.isMemory()) 106 | masm.store32(from.reg(), getAdjustedAddress(to)); 107 | else 108 | MOZ_CRASH("Invalid emitInt32Move arguments."); 109 | } else if (from.isMemory()) { 110 | if (to.isGeneralReg()) { 111 | masm.load32(getAdjustedAddress(from), to.reg()); 112 | } else if (to.isMemory()) { 113 | masm.load32(getAdjustedAddress(from), tempReg()); 114 | masm.store32(tempReg(), getAdjustedAddress(to)); 115 | } else { 116 | MOZ_CRASH("Invalid emitInt32Move arguments."); 117 | } 118 | } else if (from.isEffectiveAddress()) { 119 | if (to.isGeneralReg()) { 120 | masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg()); 121 | } else if (to.isMemory()) { 122 | masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg()); 123 | masm.store32(tempReg(), getAdjustedAddress(to)); 124 | } else { 125 | MOZ_CRASH("Invalid emitInt32Move arguments."); 126 | } 127 | } else { 128 | MOZ_CRASH("Invalid emitInt32Move arguments."); 129 | } 130 | } 131 | 132 | void 133 | MoveEmitterMIPSShared::emitFloat32Move(const MoveOperand& from, const MoveOperand& to) 134 | { 135 | // Ensure that we can use ScratchFloat32Reg in memory move. 136 | MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloat32Reg); 137 | MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloat32Reg); 138 | 139 | if (from.isFloatReg()) { 140 | if (to.isFloatReg()) { 141 | masm.moveFloat32(from.floatReg(), to.floatReg()); 142 | } else if (to.isGeneralReg()) { 143 | // This should only be used when passing float parameter in a1,a2,a3 144 | MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3); 145 | masm.moveFromFloat32(from.floatReg(), to.reg()); 146 | } else { 147 | MOZ_ASSERT(to.isMemory()); 148 | masm.storeFloat32(from.floatReg(), getAdjustedAddress(to)); 149 | } 150 | } else if (to.isFloatReg()) { 151 | MOZ_ASSERT(from.isMemory()); 152 | masm.loadFloat32(getAdjustedAddress(from), to.floatReg()); 153 | } else if (to.isGeneralReg()) { 154 | MOZ_ASSERT(from.isMemory()); 155 | // This should only be used when passing float parameter in a1,a2,a3 156 | MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3); 157 | masm.loadPtr(getAdjustedAddress(from), to.reg()); 158 | } else { 159 | MOZ_ASSERT(from.isMemory()); 160 | MOZ_ASSERT(to.isMemory()); 161 | masm.loadFloat32(getAdjustedAddress(from), ScratchFloat32Reg); 162 | masm.storeFloat32(ScratchFloat32Reg, getAdjustedAddress(to)); 163 | } 164 | } 165 | 166 | void 167 | MoveEmitterMIPSShared::emit(const MoveOp& move) 168 | { 169 | const MoveOperand& from = move.from(); 170 | const MoveOperand& to = move.to(); 171 | 172 | if (move.isCycleEnd() && move.isCycleBegin()) { 173 | // A fun consequence of aliased registers is you can have multiple 174 | // cycles at once, and one can end exactly where another begins. 175 | breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot()); 176 | completeCycle(from, to, move.type(), move.cycleEndSlot()); 177 | return; 178 | } 179 | 180 | if (move.isCycleEnd()) { 181 | MOZ_ASSERT(inCycle_); 182 | completeCycle(from, to, move.type(), move.cycleEndSlot()); 183 | MOZ_ASSERT(inCycle_ > 0); 184 | inCycle_--; 185 | return; 186 | } 187 | 188 | if (move.isCycleBegin()) { 189 | breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot()); 190 | inCycle_++; 191 | } 192 | 193 | switch (move.type()) { 194 | case MoveOp::FLOAT32: 195 | emitFloat32Move(from, to); 196 | break; 197 | case MoveOp::DOUBLE: 198 | emitDoubleMove(from, to); 199 | break; 200 | case MoveOp::INT32: 201 | emitInt32Move(from, to); 202 | break; 203 | case MoveOp::GENERAL: 204 | emitMove(from, to); 205 | break; 206 | default: 207 | MOZ_CRASH("Unexpected move type"); 208 | } 209 | } 210 | 211 | void 212 | MoveEmitterMIPSShared::assertDone() 213 | { 214 | MOZ_ASSERT(inCycle_ == 0); 215 | } 216 | 217 | void 218 | MoveEmitterMIPSShared::finish() 219 | { 220 | assertDone(); 221 | 222 | masm.freeStack(masm.framePushed() - pushedAtStart_); 223 | } 224 | -------------------------------------------------------------------------------- /MoveEmitter-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_MoveEmitter_mips_shared_h 8 | #define jit_mips_shared_MoveEmitter_mips_shared_h 9 | 10 | #include "jit/MacroAssembler.h" 11 | #include "jit/MoveResolver.h" 12 | 13 | namespace js { 14 | namespace jit { 15 | 16 | class MoveEmitterMIPSShared 17 | { 18 | protected: 19 | uint32_t inCycle_; 20 | MacroAssembler& masm; 21 | 22 | // Original stack push value. 23 | uint32_t pushedAtStart_; 24 | 25 | // These store stack offsets to spill locations, snapshotting 26 | // codegen->framePushed_ at the time they were allocated. They are -1 if no 27 | // stack space has been allocated for that particular spill. 28 | int32_t pushedAtCycle_; 29 | int32_t pushedAtSpill_; 30 | 31 | // These are registers that are available for temporary use. They may be 32 | // assigned InvalidReg. If no corresponding spill space has been assigned, 33 | // then these registers do not need to be spilled. 34 | Register spilledReg_; 35 | FloatRegister spilledFloatReg_; 36 | 37 | void assertDone(); 38 | Register tempReg(); 39 | FloatRegister tempFloatReg(); 40 | Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const; 41 | int32_t getAdjustedOffset(const MoveOperand& operand); 42 | Address getAdjustedAddress(const MoveOperand& operand); 43 | 44 | void emitMove(const MoveOperand& from, const MoveOperand& to); 45 | void emitInt32Move(const MoveOperand& from, const MoveOperand& to); 46 | void emitFloat32Move(const MoveOperand& from, const MoveOperand& to); 47 | virtual void emitDoubleMove(const MoveOperand& from, const MoveOperand& to) = 0; 48 | virtual void breakCycle(const MoveOperand& from, const MoveOperand& to, 49 | MoveOp::Type type, uint32_t slot) = 0; 50 | virtual void completeCycle(const MoveOperand& from, const MoveOperand& to, 51 | MoveOp::Type type, uint32_t slot) = 0; 52 | void emit(const MoveOp& move); 53 | 54 | public: 55 | MoveEmitterMIPSShared(MacroAssembler& masm) 56 | : inCycle_(0), 57 | masm(masm), 58 | pushedAtStart_(masm.framePushed()), 59 | pushedAtCycle_(-1), 60 | pushedAtSpill_(-1), 61 | spilledReg_(InvalidReg), 62 | spilledFloatReg_(InvalidFloatReg) 63 | { } 64 | ~MoveEmitterMIPSShared() { 65 | assertDone(); 66 | } 67 | void emit(const MoveResolver& moves); 68 | void finish(); 69 | 70 | void setScratchRegister(Register reg) {} 71 | }; 72 | 73 | } // namespace jit 74 | } // namespace js 75 | 76 | #endif /* jit_mips_shared_MoveEmitter_mips_shared_h */ 77 | -------------------------------------------------------------------------------- /MoveEmitter-mips64.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/mips64/MoveEmitter-mips64.h" 8 | 9 | #include "jit/MacroAssembler-inl.h" 10 | 11 | using namespace js; 12 | using namespace js::jit; 13 | 14 | void 15 | MoveEmitterMIPS64::breakCycle(const MoveOperand& from, const MoveOperand& to, 16 | MoveOp::Type type, uint32_t slotId) 17 | { 18 | // There is some pattern: 19 | // (A -> B) 20 | // (B -> A) 21 | // 22 | // This case handles (A -> B), which we reach first. We save B, then allow 23 | // the original move to continue. 24 | switch (type) { 25 | case MoveOp::FLOAT32: 26 | if (to.isMemory()) { 27 | FloatRegister temp = ScratchFloat32Reg; 28 | masm.loadFloat32(getAdjustedAddress(to), temp); 29 | masm.storeFloat32(temp, cycleSlot(slotId)); 30 | } else { 31 | masm.storeFloat32(to.floatReg(), cycleSlot(slotId)); 32 | } 33 | break; 34 | case MoveOp::DOUBLE: 35 | if (to.isMemory()) { 36 | FloatRegister temp = ScratchDoubleReg; 37 | masm.loadDouble(getAdjustedAddress(to), temp); 38 | masm.storeDouble(temp, cycleSlot(slotId)); 39 | } else { 40 | masm.storeDouble(to.floatReg(), cycleSlot(slotId)); 41 | } 42 | break; 43 | case MoveOp::INT32: 44 | if (to.isMemory()) { 45 | Register temp = tempReg(); 46 | masm.load32(getAdjustedAddress(to), temp); 47 | masm.store32(temp, cycleSlot(0)); 48 | } else { 49 | // Second scratch register should not be moved by MoveEmitter. 50 | MOZ_ASSERT(to.reg() != spilledReg_); 51 | masm.store32(to.reg(), cycleSlot(0)); 52 | } 53 | break; 54 | case MoveOp::GENERAL: 55 | if (to.isMemory()) { 56 | Register temp = tempReg(); 57 | masm.loadPtr(getAdjustedAddress(to), temp); 58 | masm.storePtr(temp, cycleSlot(0)); 59 | } else { 60 | // Second scratch register should not be moved by MoveEmitter. 61 | MOZ_ASSERT(to.reg() != spilledReg_); 62 | masm.storePtr(to.reg(), cycleSlot(0)); 63 | } 64 | break; 65 | default: 66 | MOZ_CRASH("Unexpected move type"); 67 | } 68 | } 69 | 70 | void 71 | MoveEmitterMIPS64::completeCycle(const MoveOperand& from, const MoveOperand& to, 72 | MoveOp::Type type, uint32_t slotId) 73 | { 74 | // There is some pattern: 75 | // (A -> B) 76 | // (B -> A) 77 | // 78 | // This case handles (B -> A), which we reach last. We emit a move from the 79 | // saved value of B, to A. 80 | switch (type) { 81 | case MoveOp::FLOAT32: 82 | if (to.isMemory()) { 83 | FloatRegister temp = ScratchFloat32Reg; 84 | masm.loadFloat32(cycleSlot(slotId), temp); 85 | masm.storeFloat32(temp, getAdjustedAddress(to)); 86 | } else { 87 | masm.loadFloat32(cycleSlot(slotId), to.floatReg()); 88 | } 89 | break; 90 | case MoveOp::DOUBLE: 91 | if (to.isMemory()) { 92 | FloatRegister temp = ScratchDoubleReg; 93 | masm.loadDouble(cycleSlot(slotId), temp); 94 | masm.storeDouble(temp, getAdjustedAddress(to)); 95 | } else { 96 | masm.loadDouble(cycleSlot(slotId), to.floatReg()); 97 | } 98 | break; 99 | case MoveOp::INT32: 100 | MOZ_ASSERT(slotId == 0); 101 | if (to.isMemory()) { 102 | Register temp = tempReg(); 103 | masm.load32(cycleSlot(0), temp); 104 | masm.store32(temp, getAdjustedAddress(to)); 105 | } else { 106 | // Second scratch register should not be moved by MoveEmitter. 107 | MOZ_ASSERT(to.reg() != spilledReg_); 108 | masm.load32(cycleSlot(0), to.reg()); 109 | } 110 | break; 111 | case MoveOp::GENERAL: 112 | MOZ_ASSERT(slotId == 0); 113 | if (to.isMemory()) { 114 | Register temp = tempReg(); 115 | masm.loadPtr(cycleSlot(0), temp); 116 | masm.storePtr(temp, getAdjustedAddress(to)); 117 | } else { 118 | // Second scratch register should not be moved by MoveEmitter. 119 | MOZ_ASSERT(to.reg() != spilledReg_); 120 | masm.loadPtr(cycleSlot(0), to.reg()); 121 | } 122 | break; 123 | default: 124 | MOZ_CRASH("Unexpected move type"); 125 | } 126 | } 127 | 128 | void 129 | MoveEmitterMIPS64::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) 130 | { 131 | if (from.isFloatReg()) { 132 | if (to.isFloatReg()) { 133 | masm.moveDouble(from.floatReg(), to.floatReg()); 134 | } else if (to.isGeneralReg()) { 135 | masm.moveFromDouble(from.floatReg(), to.reg()); 136 | } else { 137 | MOZ_ASSERT(to.isMemory()); 138 | masm.storeDouble(from.floatReg(), getAdjustedAddress(to)); 139 | } 140 | } else if (to.isFloatReg()) { 141 | if (from.isMemory()) 142 | masm.loadDouble(getAdjustedAddress(from), to.floatReg()); 143 | else 144 | masm.moveToDouble(from.reg(), to.floatReg()); 145 | } else { 146 | MOZ_ASSERT(from.isMemory()); 147 | MOZ_ASSERT(to.isMemory()); 148 | masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg); 149 | masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to)); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /MoveEmitter-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_MoveEmitter_mips64_h 8 | #define jit_mips64_MoveEmitter_mips64_h 9 | 10 | #include "jit/mips-shared/MoveEmitter-mips-shared.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | class MoveEmitterMIPS64 : public MoveEmitterMIPSShared 16 | { 17 | void emitDoubleMove(const MoveOperand& from, const MoveOperand& to); 18 | void breakCycle(const MoveOperand& from, const MoveOperand& to, 19 | MoveOp::Type type, uint32_t slot); 20 | void completeCycle(const MoveOperand& from, const MoveOperand& to, 21 | MoveOp::Type type, uint32_t slot); 22 | 23 | public: 24 | MoveEmitterMIPS64(MacroAssembler& masm) 25 | : MoveEmitterMIPSShared(masm) 26 | { } 27 | }; 28 | 29 | typedef MoveEmitterMIPS64 MoveEmitter; 30 | 31 | } // namespace jit 32 | } // namespace js 33 | 34 | #endif /* jit_mips64_MoveEmitter_mips64_h */ 35 | -------------------------------------------------------------------------------- /MoveEmitter-ppc64le.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/ppc64le/MoveEmitter-ppc64le.h" 8 | 9 | #include "jit/MacroAssembler-inl.h" 10 | 11 | using namespace js; 12 | using namespace js::jit; 13 | 14 | void 15 | MoveEmitterPPC64LE::emit(const MoveResolver& moves) 16 | { 17 | if (moves.numCycles()) { 18 | // Reserve stack for cycle resolution 19 | masm.reserveStack(moves.numCycles() * sizeof(double)); 20 | pushedAtCycle_ = masm.framePushed(); 21 | } 22 | 23 | for (size_t i = 0; i < moves.numMoves(); i++) 24 | emit(moves.getMove(i)); 25 | } 26 | 27 | Address 28 | MoveEmitterPPC64LE::cycleSlot(uint32_t slot, uint32_t subslot) const 29 | { 30 | int32_t offset = masm.framePushed() - pushedAtCycle_; 31 | MOZ_ASSERT(Imm16::IsInSignedRange(offset)); 32 | return Address(StackPointer, offset + slot * sizeof(double) + subslot); 33 | } 34 | 35 | int32_t 36 | MoveEmitterPPC64LE::getAdjustedOffset(const MoveOperand& operand) 37 | { 38 | MOZ_ASSERT(operand.isMemoryOrEffectiveAddress()); 39 | if (operand.base() != StackPointer) 40 | return operand.disp(); 41 | 42 | // Adjust offset if stack pointer has been moved. 43 | return operand.disp() + masm.framePushed() - pushedAtStart_; 44 | } 45 | 46 | Address 47 | MoveEmitterPPC64LE::getAdjustedAddress(const MoveOperand& operand) 48 | { 49 | return Address(operand.base(), getAdjustedOffset(operand)); 50 | } 51 | 52 | 53 | Register 54 | MoveEmitterPPC64LE::tempReg() 55 | { 56 | spilledReg_ = SecondScratchReg; 57 | return SecondScratchReg; 58 | } 59 | 60 | void 61 | MoveEmitterPPC64LE::emitMove(const MoveOperand& from, const MoveOperand& to) 62 | { 63 | if (from.isGeneralReg()) { 64 | // Second scratch register should not be moved by MoveEmitter. 65 | MOZ_ASSERT(from.reg() != spilledReg_); 66 | 67 | if (to.isGeneralReg()) 68 | masm.movePtr(from.reg(), to.reg()); 69 | else if (to.isMemory()) 70 | masm.storePtr(from.reg(), getAdjustedAddress(to)); 71 | else 72 | MOZ_CRASH("Invalid emitMove arguments."); 73 | } else if (from.isMemory()) { 74 | if (to.isGeneralReg()) { 75 | masm.loadPtr(getAdjustedAddress(from), to.reg()); 76 | } else if (to.isMemory()) { 77 | masm.loadPtr(getAdjustedAddress(from), tempReg()); 78 | masm.storePtr(tempReg(), getAdjustedAddress(to)); 79 | } else { 80 | MOZ_CRASH("Invalid emitMove arguments."); 81 | } 82 | } else if (from.isEffectiveAddress()) { 83 | if (to.isGeneralReg()) { 84 | masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg()); 85 | } else if (to.isMemory()) { 86 | masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg()); 87 | masm.storePtr(tempReg(), getAdjustedAddress(to)); 88 | } else { 89 | MOZ_CRASH("Invalid emitMove arguments."); 90 | } 91 | } else { 92 | MOZ_CRASH("Invalid emitMove arguments."); 93 | } 94 | } 95 | 96 | void 97 | MoveEmitterPPC64LE::emitInt32Move(const MoveOperand &from, const MoveOperand &to) 98 | { 99 | if (from.isGeneralReg()) { 100 | // Second scratch register should not be moved by MoveEmitter. 101 | MOZ_ASSERT(from.reg() != spilledReg_); 102 | 103 | if (to.isGeneralReg()) 104 | masm.move32(from.reg(), to.reg()); 105 | else if (to.isMemory()) 106 | masm.store32(from.reg(), getAdjustedAddress(to)); 107 | else 108 | MOZ_CRASH("Invalid emitInt32Move arguments."); 109 | } else if (from.isMemory()) { 110 | if (to.isGeneralReg()) { 111 | masm.load32(getAdjustedAddress(from), to.reg()); 112 | } else if (to.isMemory()) { 113 | masm.load32(getAdjustedAddress(from), tempReg()); 114 | masm.store32(tempReg(), getAdjustedAddress(to)); 115 | } else { 116 | MOZ_CRASH("Invalid emitInt32Move arguments."); 117 | } 118 | } else if (from.isEffectiveAddress()) { 119 | if (to.isGeneralReg()) { 120 | masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg()); 121 | } else if (to.isMemory()) { 122 | masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg()); 123 | masm.store32(tempReg(), getAdjustedAddress(to)); 124 | } else { 125 | MOZ_CRASH("Invalid emitInt32Move arguments."); 126 | } 127 | } else { 128 | MOZ_CRASH("Invalid emitInt32Move arguments."); 129 | } 130 | } 131 | 132 | void 133 | MoveEmitterPPC64LE::emitFloat32Move(const MoveOperand& from, const MoveOperand& to) 134 | { 135 | // Don't clobber the temp register if it's the source. 136 | MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloat32Reg); 137 | MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloat32Reg); 138 | 139 | // Direct FPR-GPR moves are unpossible on Power and it shouldn't happen anyway. 140 | if (from.isFloatReg()) { 141 | if (to.isFloatReg()) { 142 | masm.moveFloat32(from.floatReg(), to.floatReg()); 143 | } else if (to.isGeneralReg()) { 144 | // Don't bother handling this case. 145 | MOZ_CRASH("emitFloat32Move -> GPR not allowed on PowerPC"); 146 | } else { 147 | MOZ_ASSERT(to.isMemory()); 148 | masm.storeFloat32(from.floatReg(), getAdjustedAddress(to)); 149 | } 150 | } else if (to.isFloatReg()) { 151 | MOZ_ASSERT(from.isMemory()); 152 | masm.loadFloat32(getAdjustedAddress(from), to.floatReg()); 153 | } else if (to.isGeneralReg()) { 154 | MOZ_ASSERT(from.isMemory()); 155 | masm.loadPtr(getAdjustedAddress(from), to.reg()); 156 | } else { 157 | MOZ_ASSERT(from.isMemory()); 158 | MOZ_ASSERT(to.isMemory()); 159 | masm.loadFloat32(getAdjustedAddress(from), ScratchFloat32Reg); 160 | masm.storeFloat32(ScratchFloat32Reg, getAdjustedAddress(to)); 161 | } 162 | } 163 | 164 | // This is almost the same. 165 | void 166 | MoveEmitterPPC64LE::emitDoubleMove(const MoveOperand& from, const MoveOperand& to) 167 | { 168 | // Don't clobber the temp register if it's the source. 169 | MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloat32Reg); 170 | MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloat32Reg); 171 | 172 | if (from.isFloatReg()) { 173 | if (to.isFloatReg()) { 174 | masm.moveDouble(from.floatReg(), to.floatReg()); 175 | } else if (to.isGeneralReg()) { 176 | // Maximum bogosity again, dude. 177 | MOZ_CRASH("emitDoubleMove FPR -> GPR not allowed on PowerPC"); 178 | } else { 179 | MOZ_ASSERT(to.isMemory()); 180 | masm.storeDouble(from.floatReg(), getAdjustedAddress(to)); 181 | } 182 | } else if (to.isFloatReg()) { 183 | MOZ_ASSERT(from.isMemory()); 184 | masm.loadDouble(getAdjustedAddress(from), to.floatReg()); 185 | } else if (to.isGeneralReg()) { 186 | // Not handled (yet?) 187 | MOZ_CRASH("emitDoubleMove mem -> GPR not allowed on PowerPC"); 188 | } else { 189 | MOZ_ASSERT(from.isMemory()); 190 | MOZ_ASSERT(to.isMemory()); 191 | masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg); 192 | masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to)); 193 | } 194 | } 195 | 196 | void 197 | MoveEmitterPPC64LE::breakCycle(const MoveOperand& from, const MoveOperand& to, 198 | MoveOp::Type type, uint32_t slotId) 199 | { 200 | // There is some pattern: 201 | // (A -> B) 202 | // (B -> A) 203 | // 204 | // This case handles (A -> B), which we reach first. We save B, then allow 205 | // the original move to continue. 206 | switch (type) { 207 | case MoveOp::FLOAT32: 208 | if (to.isMemory()) { 209 | FloatRegister temp = ScratchFloat32Reg; 210 | masm.loadFloat32(getAdjustedAddress(to), temp); 211 | masm.storeFloat32(temp, cycleSlot(slotId)); 212 | } else { 213 | masm.storeFloat32(to.floatReg(), cycleSlot(slotId)); 214 | } 215 | break; 216 | case MoveOp::DOUBLE: 217 | if (to.isMemory()) { 218 | FloatRegister temp = ScratchDoubleReg; 219 | masm.loadDouble(getAdjustedAddress(to), temp); 220 | masm.storeDouble(temp, cycleSlot(slotId)); 221 | } else { 222 | masm.storeDouble(to.floatReg(), cycleSlot(slotId)); 223 | } 224 | break; 225 | case MoveOp::INT32: 226 | if (to.isMemory()) { 227 | Register temp = tempReg(); 228 | masm.load32(getAdjustedAddress(to), temp); 229 | masm.store32(temp, cycleSlot(0)); 230 | } else { 231 | // Second scratch register should not be moved by MoveEmitter. 232 | MOZ_ASSERT(to.reg() != spilledReg_); 233 | masm.store32(to.reg(), cycleSlot(0)); 234 | } 235 | break; 236 | case MoveOp::GENERAL: 237 | if (to.isMemory()) { 238 | Register temp = tempReg(); 239 | masm.loadPtr(getAdjustedAddress(to), temp); 240 | masm.storePtr(temp, cycleSlot(0)); 241 | } else { 242 | // Second scratch register should not be moved by MoveEmitter. 243 | MOZ_ASSERT(to.reg() != spilledReg_); 244 | masm.storePtr(to.reg(), cycleSlot(0)); 245 | } 246 | break; 247 | default: 248 | MOZ_CRASH("Unexpected move type"); 249 | } 250 | } 251 | 252 | void 253 | MoveEmitterPPC64LE::completeCycle(const MoveOperand& from, const MoveOperand& to, 254 | MoveOp::Type type, uint32_t slotId) 255 | { 256 | // There is some pattern: 257 | // (A -> B) 258 | // (B -> A) 259 | // 260 | // This case handles (B -> A), which we reach last. We emit a move from the 261 | // saved value of B, to A. 262 | switch (type) { 263 | case MoveOp::FLOAT32: 264 | if (to.isMemory()) { 265 | FloatRegister temp = ScratchFloat32Reg; 266 | masm.loadFloat32(cycleSlot(slotId), temp); 267 | masm.storeFloat32(temp, getAdjustedAddress(to)); 268 | } else { 269 | masm.loadFloat32(cycleSlot(slotId), to.floatReg()); 270 | } 271 | break; 272 | case MoveOp::DOUBLE: 273 | if (to.isMemory()) { 274 | FloatRegister temp = ScratchDoubleReg; 275 | masm.loadDouble(cycleSlot(slotId), temp); 276 | masm.storeDouble(temp, getAdjustedAddress(to)); 277 | } else { 278 | masm.loadDouble(cycleSlot(slotId), to.floatReg()); 279 | } 280 | break; 281 | case MoveOp::INT32: 282 | MOZ_ASSERT(slotId == 0); 283 | if (to.isMemory()) { 284 | Register temp = tempReg(); 285 | masm.load32(cycleSlot(0), temp); 286 | masm.store32(temp, getAdjustedAddress(to)); 287 | } else { 288 | // Second scratch register should not be moved by MoveEmitter. 289 | MOZ_ASSERT(to.reg() != spilledReg_); 290 | masm.load32(cycleSlot(0), to.reg()); 291 | } 292 | break; 293 | case MoveOp::GENERAL: 294 | MOZ_ASSERT(slotId == 0); 295 | if (to.isMemory()) { 296 | Register temp = tempReg(); 297 | masm.loadPtr(cycleSlot(0), temp); 298 | masm.storePtr(temp, getAdjustedAddress(to)); 299 | } else { 300 | // Second scratch register should not be moved by MoveEmitter. 301 | MOZ_ASSERT(to.reg() != spilledReg_); 302 | masm.loadPtr(cycleSlot(0), to.reg()); 303 | } 304 | break; 305 | default: 306 | MOZ_CRASH("Unexpected move type"); 307 | } 308 | } 309 | 310 | void 311 | MoveEmitterPPC64LE::emit(const MoveOp& move) 312 | { 313 | const MoveOperand& from = move.from(); 314 | const MoveOperand& to = move.to(); 315 | 316 | if (move.isCycleEnd() && move.isCycleBegin()) { 317 | // A fun consequence of aliased registers is you can have multiple 318 | // cycles at once, and one can end exactly where another begins. 319 | breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot()); 320 | completeCycle(from, to, move.type(), move.cycleEndSlot()); 321 | return; 322 | } 323 | 324 | if (move.isCycleEnd()) { 325 | MOZ_ASSERT(inCycle_); 326 | completeCycle(from, to, move.type(), move.cycleEndSlot()); 327 | MOZ_ASSERT(inCycle_ > 0); 328 | inCycle_--; 329 | return; 330 | } 331 | 332 | if (move.isCycleBegin()) { 333 | breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot()); 334 | inCycle_++; 335 | } 336 | 337 | switch (move.type()) { 338 | case MoveOp::FLOAT32: 339 | emitFloat32Move(from, to); 340 | break; 341 | case MoveOp::DOUBLE: 342 | emitDoubleMove(from, to); 343 | break; 344 | case MoveOp::INT32: 345 | emitInt32Move(from, to); 346 | break; 347 | case MoveOp::GENERAL: 348 | emitMove(from, to); 349 | break; 350 | default: 351 | MOZ_CRASH("Unexpected move type"); 352 | } 353 | } 354 | 355 | void 356 | MoveEmitterPPC64LE::assertDone() 357 | { 358 | MOZ_ASSERT(inCycle_ == 0); 359 | } 360 | 361 | void 362 | MoveEmitterPPC64LE::finish() 363 | { 364 | assertDone(); 365 | 366 | masm.freeStack(masm.framePushed() - pushedAtStart_); 367 | } 368 | -------------------------------------------------------------------------------- /MoveEmitter-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_MoveEmitter_ppc64le_h 8 | #define jit_ppc64le_MoveEmitter_ppc64le_h 9 | 10 | #include "jit/MacroAssembler.h" 11 | #include "jit/MoveResolver.h" 12 | 13 | namespace js { 14 | namespace jit { 15 | 16 | class MoveEmitterPPC64LE 17 | { 18 | protected: 19 | uint32_t inCycle_; 20 | MacroAssembler& masm; 21 | 22 | // Original stack push value. 23 | uint32_t pushedAtStart_; 24 | 25 | // These store stack offsets to spill locations, snapshotting 26 | // codegen->framePushed_ at the time they were allocated. They are -1 if no 27 | // stack space has been allocated for that particular spill. 28 | int32_t pushedAtCycle_; 29 | int32_t pushedAtSpill_; 30 | 31 | // These are registers that are available for temporary use. They may be 32 | // assigned InvalidReg. If no corresponding spill space has been assigned, 33 | // then these registers do not need to be spilled. 34 | Register spilledReg_; 35 | FloatRegister spilledFloatReg_; 36 | 37 | void assertDone(); 38 | Register tempReg(); 39 | FloatRegister tempFloatReg(); 40 | Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const; 41 | int32_t getAdjustedOffset(const MoveOperand& operand); 42 | Address getAdjustedAddress(const MoveOperand& operand); 43 | 44 | void emitMove(const MoveOperand& from, const MoveOperand& to); 45 | void emitInt32Move(const MoveOperand& from, const MoveOperand& to); 46 | void emitFloat32Move(const MoveOperand& from, const MoveOperand& to); 47 | void emitDoubleMove(const MoveOperand& from, const MoveOperand& to); 48 | void breakCycle(const MoveOperand& from, const MoveOperand& to, 49 | MoveOp::Type type, uint32_t slot); 50 | void completeCycle(const MoveOperand& from, const MoveOperand& to, 51 | MoveOp::Type type, uint32_t slot); 52 | void emit(const MoveOp& move); 53 | 54 | public: 55 | MoveEmitterPPC64LE(MacroAssembler& masm) 56 | : inCycle_(0), 57 | masm(masm), 58 | pushedAtStart_(masm.framePushed()), 59 | pushedAtCycle_(-1), 60 | pushedAtSpill_(-1), 61 | spilledReg_(InvalidReg), 62 | spilledFloatReg_(InvalidFloatReg) 63 | { } 64 | ~MoveEmitterPPC64LE() { 65 | assertDone(); 66 | } 67 | void emit(const MoveResolver& moves); 68 | void finish(); 69 | 70 | void setScratchRegister(Register reg) {} 71 | }; 72 | 73 | typedef MoveEmitterPPC64LE MoveEmitter; 74 | 75 | } // namespace jit 76 | } // namespace js 77 | 78 | #endif /* jit_ppc64le_MoveEmitter_ppc64le_h */ 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # JITPOWER 2 | 3 | This is a temporary staging ground for work on the Firefox ppc64 JIT. Once the MVP has been completed, the intention is to add it to the Firefox tree for maintenance as part of Firefox and this repo will eventually be removed. 4 | 5 | ## MVP Definition 6 | 7 | The Minimum Viable Product is a POWER9 little-endian JIT with Wasm and without SIMD running with the 64-bit ELFv2 ABI and a 64K page size. There is certainly interest and value in big-endian and additional 64-bit PowerPC support (potentially all the way down to the 970/G5), but these can be grafted on after the fact and the SpiderMonkey JIT core is known to have endian problems which need fixing separately (see TenFourFox for more on that). Additionally, POWER9 is a viable desktop system thanks to the Raptor Talos family which developers are likely to already be using, and the new instructions in ISA 3.0 make initial porting substantially simpler on POWER9. 8 | 9 | SIMD is a separate issue which will not be addressed during the MVP period, and isn't necessary to get the JIT off the ground on any platform. 10 | 11 | ## Work Phases 12 | 13 | ### Required For MVP 14 | 15 | 1. Create a first draft JIT using the existing MIPS64 LE sources from Fx62 as a scaffold. Fx62 was chosen simply because it was the version when the port was originally commenced; there's nothing special about it otherwise other than to eliminate churn with constantly integrating new changes. This step is already **in progress**. 16 | 1. Ensure the first draft JIT compiles. A temporary Fx62 tree will probably be uploaded to facilitate this. 17 | 1. Ensure the first draft JIT links (add any missing functions). 18 | 1. Ensure the first draft JIT passes all tests in Fx62 (all tests in `js/src/jit-test`, `js/src/tests` and `jsapi-tests`). 19 | 1. Forward port the first draft JIT to `mozilla-central` as the second draft JIT. 20 | 1. Ensure the second draft JIT compiles. 21 | 1. Ensure the second draft JIT links. 22 | 1. Repair any new test failures. 23 | 1. Submit to Bugzilla in separate patch sets (patches needed against the JIT core, and new files). 24 | 25 | ### In Scope But Not Required For MVP 26 | 27 | 1. Simulator support to allow automated testing. 28 | 29 | ### Not In Scope For MVP (but desirable) 30 | 31 | Roughly in order of priority: 32 | 33 | 1. Support for POWER8 and earlier. 34 | 1. BE support. 35 | 1. Support for non-64K page sizes. 36 | 1. SIMD with VMX/VSX. 37 | 38 | Please don't submit pull requests for these yet unless they are trivial. 39 | 40 | ## Phase 1: What's Done So Far (And Might Even Work) 41 | 42 | Some of this code originates from TenFourFox's IonPower JIT. 43 | 44 | - Ion code generator (`CodeGenerator*`) 45 | - Baseline code generator (`BaselineCompiler*`) 46 | - Baseline and shared inline cache code generators (`BaselineIC*`, `SharedIC*`) 47 | - LIR (intermediate representation) design (`LIR*.h`) 48 | - Basic lowering/strength reduction (`Lowering*`) 49 | - Bailouts (`Bailouts*`) 50 | - Move code generator (`MoveEmitter*`) 51 | - Trampoline code generator (`Trampoline*.cpp`) 52 | 53 | What's remaining to do: 54 | 55 | - Check my work, because I'm an idiot 56 | - Define `Architecture` and exact interal JIT ABI usage, including temporary registers; see [the ABI documentation](http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/ch_preface.html) 57 | - Current plan is to use `r0` and `r12` as temporary registers (regenerating `r12` when making PLT-like calls back to PPC64 ABI-compliant code through `mtctr` `bctr`); `r12` or some other non-`r0` register needed as an "address" holder 58 | - Try to enable use of as many GPRs and FPRs as possible 59 | - Don't define SPRs other than `lr` outside of our JIT backend since SpiderMonkey doesn't understand the concept 60 | - No vector registers for the MVP 61 | - Create `MacroAssembler` (`ma_*` functions) based on those used by the code generators 62 | - Create `Assembler` (`as_*` [native instructions] and `xs_*` [commonly accepted alternative mnemonics] functions) based on instructions issued by the code generators and the macro assembler 63 | 64 | ## Feel free to start on what you like 65 | 66 | The original MIPS code from which the new source was cribbed and rewritten is provided. Don't remove these just in case we need to refer back to them for the original semantics. 67 | 68 | All code is MPL 2.0 and you agree that by submitting a pull request, you automatically grant use of your work under that license. 69 | -------------------------------------------------------------------------------- /SharedIC-mips64.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/BaselineCompiler.h" 8 | #include "jit/BaselineIC.h" 9 | #include "jit/BaselineJIT.h" 10 | #include "jit/Linker.h" 11 | #include "jit/SharedICHelpers.h" 12 | #include "vm/Iteration.h" 13 | 14 | #include "builtin/Boolean-inl.h" 15 | 16 | using namespace js; 17 | using namespace js::jit; 18 | 19 | namespace js { 20 | namespace jit { 21 | 22 | // ICBinaryArith_Int32 23 | 24 | bool 25 | ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) 26 | { 27 | // Guard that R0 is an integer and R1 is an integer. 28 | Label failure; 29 | masm.branchTestInt32(Assembler::NotEqual, R0, &failure); 30 | masm.branchTestInt32(Assembler::NotEqual, R1, &failure); 31 | 32 | // Add R0 and R1. Don't need to explicitly unbox, just use R2's valueReg. 33 | Register scratchReg = R2.valueReg(); 34 | 35 | Label goodMul, divTest1, divTest2; 36 | switch(op_) { 37 | case JSOP_ADD: 38 | masm.unboxInt32(R0, ExtractTemp0); 39 | masm.unboxInt32(R1, ExtractTemp1); 40 | masm.ma_addTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); 41 | masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); 42 | break; 43 | case JSOP_SUB: 44 | masm.unboxInt32(R0, ExtractTemp0); 45 | masm.unboxInt32(R1, ExtractTemp1); 46 | masm.ma_subTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); 47 | masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); 48 | break; 49 | case JSOP_MUL: { 50 | masm.unboxInt32(R0, ExtractTemp0); 51 | masm.unboxInt32(R1, ExtractTemp1); 52 | masm.ma_mul_branch_overflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); 53 | 54 | masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump); 55 | 56 | // Result is -0 if operands have different signs. 57 | masm.as_xor(t8, ExtractTemp0, ExtractTemp1); 58 | masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump); 59 | 60 | masm.bind(&goodMul); 61 | masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); 62 | break; 63 | } 64 | case JSOP_DIV: 65 | case JSOP_MOD: { 66 | masm.unboxInt32(R0, ExtractTemp0); 67 | masm.unboxInt32(R1, ExtractTemp1); 68 | // Check for INT_MIN / -1, it results in a double. 69 | masm.ma_b(ExtractTemp0, Imm32(INT_MIN), &divTest1, Assembler::NotEqual, ShortJump); 70 | masm.ma_b(ExtractTemp1, Imm32(-1), &failure, Assembler::Equal, ShortJump); 71 | masm.bind(&divTest1); 72 | 73 | // Check for division by zero 74 | masm.ma_b(ExtractTemp1, Imm32(0), &failure, Assembler::Equal, ShortJump); 75 | 76 | // Check for 0 / X with X < 0 (results in -0). 77 | masm.ma_b(ExtractTemp0, Imm32(0), &divTest2, Assembler::NotEqual, ShortJump); 78 | masm.ma_b(ExtractTemp1, Imm32(0), &failure, Assembler::LessThan, ShortJump); 79 | masm.bind(&divTest2); 80 | 81 | masm.as_div(ExtractTemp0, ExtractTemp1); 82 | 83 | if (op_ == JSOP_DIV) { 84 | // Result is a double if the remainder != 0. 85 | masm.as_mfhi(scratchReg); 86 | masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::NotEqual, ShortJump); 87 | masm.as_mflo(scratchReg); 88 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 89 | } else { 90 | Label done; 91 | // If X % Y == 0 and X < 0, the result is -0. 92 | masm.as_mfhi(scratchReg); 93 | masm.ma_b(scratchReg, Imm32(0), &done, Assembler::NotEqual, ShortJump); 94 | masm.ma_b(ExtractTemp0, Imm32(0), &failure, Assembler::LessThan, ShortJump); 95 | masm.bind(&done); 96 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 97 | } 98 | break; 99 | } 100 | case JSOP_BITOR: 101 | masm.as_or(R0.valueReg() , R0.valueReg(), R1.valueReg()); 102 | break; 103 | case JSOP_BITXOR: 104 | masm.as_xor(scratchReg, R0.valueReg(), R1.valueReg()); 105 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 106 | break; 107 | case JSOP_BITAND: 108 | masm.as_and(R0.valueReg() , R0.valueReg(), R1.valueReg()); 109 | break; 110 | case JSOP_LSH: 111 | masm.unboxInt32(R0, ExtractTemp0); 112 | masm.unboxInt32(R1, ExtractTemp1); 113 | // MIPS will only use 5 lowest bits in R1 as shift offset. 114 | masm.ma_sll(scratchReg, ExtractTemp0, ExtractTemp1); 115 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 116 | break; 117 | case JSOP_RSH: 118 | masm.unboxInt32(R0, ExtractTemp0); 119 | masm.unboxInt32(R1, ExtractTemp1); 120 | masm.ma_sra(scratchReg, ExtractTemp0, ExtractTemp1); 121 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 122 | break; 123 | case JSOP_URSH: 124 | masm.unboxInt32(R0, ExtractTemp0); 125 | masm.unboxInt32(R1, ExtractTemp1); 126 | masm.ma_srl(scratchReg, ExtractTemp0, ExtractTemp1); 127 | if (allowDouble_) { 128 | Label toUint; 129 | masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump); 130 | 131 | // Move result and box for return. 132 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 133 | EmitReturnFromIC(masm); 134 | 135 | masm.bind(&toUint); 136 | masm.convertUInt32ToDouble(scratchReg, FloatReg1); 137 | masm.boxDouble(FloatReg1, R0, ScratchDoubleReg); 138 | } else { 139 | masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump); 140 | // Move result for return. 141 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 142 | } 143 | break; 144 | default: 145 | MOZ_CRASH("Unhandled op for BinaryArith_Int32."); 146 | } 147 | 148 | EmitReturnFromIC(masm); 149 | 150 | // Failure case - jump to next stub 151 | masm.bind(&failure); 152 | EmitStubGuardFailure(masm); 153 | 154 | return true; 155 | } 156 | 157 | } // namespace jit 158 | } // namespace js 159 | -------------------------------------------------------------------------------- /SharedIC-ppc64le.cpp: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #include "jit/BaselineCompiler.h" 8 | #include "jit/BaselineIC.h" 9 | #include "jit/BaselineJIT.h" 10 | #include "jit/Linker.h" 11 | #include "jit/SharedICHelpers.h" 12 | #include "vm/Iteration.h" 13 | 14 | #include "builtin/Boolean-inl.h" 15 | 16 | using namespace js; 17 | using namespace js::jit; 18 | 19 | namespace js { 20 | namespace jit { 21 | 22 | // ICBinaryArith_Int32 23 | 24 | bool 25 | ICBinaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm) 26 | { 27 | // Guard that R0 is an integer and R1 is an integer. 28 | Label failure; 29 | masm.branchTestInt32(Assembler::NotEqual, R0, &failure); 30 | masm.branchTestInt32(Assembler::NotEqual, R1, &failure); 31 | 32 | // Add R0 and R1. Don't need to explicitly unbox, just use R2's valueReg. 33 | Register scratchReg = R2.valueReg(); 34 | 35 | Label goodMul, divTest1, divTest2; 36 | switch(op_) { 37 | case JSOP_ADD: 38 | masm.unboxInt32(R0, ExtractTemp0); 39 | masm.unboxInt32(R1, ExtractTemp1); 40 | masm.ma_addTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); 41 | masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); 42 | break; 43 | case JSOP_SUB: 44 | masm.unboxInt32(R0, ExtractTemp0); 45 | masm.unboxInt32(R1, ExtractTemp1); 46 | masm.ma_subTestOverflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); 47 | masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); 48 | break; 49 | case JSOP_MUL: { 50 | masm.unboxInt32(R0, ExtractTemp0); 51 | masm.unboxInt32(R1, ExtractTemp1); 52 | masm.ma_mul_branch_overflow(scratchReg, ExtractTemp0, ExtractTemp1, &failure); 53 | 54 | masm.ma_b(scratchReg, Imm32(0), &goodMul, Assembler::NotEqual, ShortJump); 55 | 56 | // Result is -0 if operands have different signs. 57 | masm.as_xor(t8, ExtractTemp0, ExtractTemp1); 58 | masm.ma_b(t8, Imm32(0), &failure, Assembler::LessThan, ShortJump); 59 | 60 | masm.bind(&goodMul); 61 | masm.boxValue(JSVAL_TYPE_INT32, scratchReg, R0.valueReg()); 62 | break; 63 | } 64 | case JSOP_DIV: 65 | case JSOP_MOD: { 66 | masm.unboxInt32(R0, ExtractTemp0); 67 | masm.unboxInt32(R1, ExtractTemp1); 68 | 69 | // divwo will automatically set overflow is INT_MIN/-1 or x/0 is 70 | // performed, so all we need to do is check for negative zero, which 71 | // requires a double. 72 | masm.ma_b(ExtractTemp0, Imm32(0), &divTest2, Assembler::NotEqual, ShortJump); 73 | masm.ma_b(ExtractTemp1, Imm32(0), &failure, Assembler::LessThan, ShortJump); 74 | masm.bind(&divTest2); 75 | 76 | masm.as_divwo(scratchReg, ExtractTemp0, ExtractTemp1); 77 | 78 | // We need to check the remainder to know if the result is not 79 | // integral. If temp * R1 != R0, then there is a remainder. 80 | // We know it can't overflow, so mullw is sufficient. 81 | masm.as_mullw(r0, scratchReg, ExtractTemp1); 82 | 83 | if (op_ == JSOP_DIV) { 84 | // Result is a double if the remainder != 0. 85 | masm.ma_b(r0, ExtractTemp0, &failure, Assembler::NotEqual, ShortJump); 86 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 87 | } else { 88 | Label done; 89 | MOZ_ASSERT(op_ == JSOP_MOD); 90 | 91 | // If X % Y == 0 and X < 0, the result is -0. 92 | masm.ma_b(r0, Imm32(0), &done, Assembler::NotEqual, ShortJump); 93 | masm.ma_b(ExtractTemp0, Imm32(0), &failure, Assembler::LessThan, ShortJump); 94 | masm.bind(&done); 95 | masm.subf(scratchReg, r0, ExtractTemp0); // T = B - A 96 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 97 | } 98 | break; 99 | } 100 | case JSOP_BITOR: 101 | masm.as_or(R0.valueReg() , R0.valueReg(), R1.valueReg()); 102 | break; 103 | case JSOP_BITXOR: 104 | masm.as_xor(scratchReg, R0.valueReg(), R1.valueReg()); 105 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 106 | break; 107 | case JSOP_BITAND: 108 | masm.as_and(R0.valueReg() , R0.valueReg(), R1.valueReg()); 109 | break; 110 | case JSOP_LSH: 111 | masm.unboxInt32(R0, ExtractTemp0); 112 | masm.unboxInt32(R1, ExtractTemp1); 113 | // Mask to 0x1f, just in case. 114 | masm.as_andi_rc(r0, ExtractTemp1, 0x1f); 115 | masm.as_slw(scratchReg, ExtractTemp0, r0); 116 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 117 | break; 118 | case JSOP_RSH: 119 | masm.unboxInt32(R0, ExtractTemp0); 120 | masm.unboxInt32(R1, ExtractTemp1); 121 | // Mask to 0x1f, just in case. 122 | masm.as_andi_rc(r0, ExtractTemp1, 0x1f); 123 | masm.as_sraw(scratchReg, ExtractTemp0, r0); 124 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 125 | break; 126 | case JSOP_URSH: 127 | masm.unboxInt32(R0, ExtractTemp0); 128 | masm.unboxInt32(R1, ExtractTemp1); 129 | // Mask to 0x1f, just in case. 130 | masm.as_andi_rc(r0, ExtractTemp1, 0x1f); 131 | masm.as_srw(scratchReg, ExtractTemp0, ExtractTemp1); 132 | if (allowDouble_) { 133 | Label toUint; 134 | masm.ma_b(scratchReg, Imm32(0), &toUint, Assembler::LessThan, ShortJump); 135 | 136 | // Move result and box for return. 137 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 138 | EmitReturnFromIC(masm); 139 | 140 | masm.bind(&toUint); 141 | masm.convertUInt32ToDouble(scratchReg, FloatReg1); 142 | masm.boxDouble(FloatReg1, R0, ScratchDoubleReg); 143 | } else { 144 | masm.ma_b(scratchReg, Imm32(0), &failure, Assembler::LessThan, ShortJump); 145 | // Move result for return. 146 | masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R0); 147 | } 148 | break; 149 | default: 150 | MOZ_CRASH("Unhandled op for BinaryArith_Int32."); 151 | } 152 | 153 | EmitReturnFromIC(masm); 154 | 155 | // Failure case - jump to next stub 156 | masm.bind(&failure); 157 | EmitStubGuardFailure(masm); 158 | 159 | return true; 160 | } 161 | 162 | } // namespace jit 163 | } // namespace js 164 | -------------------------------------------------------------------------------- /SharedICHelpers-mips-shared-inl.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_SharedICHelpers_mips_shared_inl_h 8 | #define jit_mips_shared_SharedICHelpers_mips_shared_inl_h 9 | 10 | #include "jit/SharedICHelpers.h" 11 | 12 | #include "jit/MacroAssembler-inl.h" 13 | 14 | namespace js { 15 | namespace jit { 16 | 17 | inline void 18 | EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t argSize) 19 | { 20 | Register scratch = R2.scratchReg(); 21 | 22 | // Compute frame size. 23 | masm.movePtr(BaselineFrameReg, scratch); 24 | masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch); 25 | masm.subPtr(BaselineStackReg, scratch); 26 | 27 | // Store frame size without VMFunction arguments for GC marking. 28 | masm.subPtr(Imm32(argSize), scratch); 29 | masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); 30 | masm.addPtr(Imm32(argSize), scratch); 31 | 32 | // Push frame descriptor and perform the tail call. 33 | // ICTailCallReg (ra) already contains the return address (as we 34 | // keep it there through the stub calls), but the VMWrapper code being 35 | // called expects the return address to also be pushed on the stack. 36 | MOZ_ASSERT(ICTailCallReg == ra); 37 | masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size()); 38 | masm.subPtr(Imm32(sizeof(CommonFrameLayout)), StackPointer); 39 | masm.storePtr(scratch, Address(StackPointer, CommonFrameLayout::offsetOfDescriptor())); 40 | masm.storePtr(ra, Address(StackPointer, CommonFrameLayout::offsetOfReturnAddress())); 41 | 42 | masm.jump(target); 43 | } 44 | 45 | inline void 46 | EmitIonTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t stackSize) 47 | { 48 | Register scratch = R2.scratchReg(); 49 | 50 | masm.loadPtr(Address(sp, stackSize), scratch); 51 | masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch); 52 | masm.addPtr(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch); 53 | 54 | // Push frame descriptor and perform the tail call. 55 | MOZ_ASSERT(ICTailCallReg == ra); 56 | masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size()); 57 | masm.push(scratch); 58 | masm.push(ICTailCallReg); 59 | masm.jump(target); 60 | } 61 | 62 | inline void 63 | EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) 64 | { 65 | // Compute stub frame size. We have to add two pointers: the stub reg and 66 | // previous frame pointer pushed by EmitEnterStubFrame. 67 | masm.movePtr(BaselineFrameReg, reg); 68 | masm.addPtr(Imm32(sizeof(intptr_t) * 2), reg); 69 | masm.subPtr(BaselineStackReg, reg); 70 | 71 | masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize); 72 | } 73 | 74 | inline void 75 | EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) 76 | { 77 | Register scratch = R2.scratchReg(); 78 | EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size()); 79 | masm.push(scratch); 80 | masm.call(target); 81 | } 82 | 83 | inline void 84 | EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) 85 | { 86 | MOZ_ASSERT(scratch != ICTailCallReg); 87 | 88 | // Compute frame size. 89 | masm.movePtr(BaselineFrameReg, scratch); 90 | masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch); 91 | masm.subPtr(BaselineStackReg, scratch); 92 | 93 | masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); 94 | 95 | // Note: when making changes here, don't forget to update 96 | // BaselineStubFrame if needed. 97 | 98 | // Push frame descriptor and return address. 99 | masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size()); 100 | masm.subPtr(Imm32(STUB_FRAME_SIZE), StackPointer); 101 | masm.storePtr(scratch, Address(StackPointer, offsetof(BaselineStubFrame, descriptor))); 102 | masm.storePtr(ICTailCallReg, Address(StackPointer, 103 | offsetof(BaselineStubFrame, returnAddress))); 104 | 105 | // Save old frame pointer, stack pointer and stub reg. 106 | masm.storePtr(ICStubReg, Address(StackPointer, 107 | offsetof(BaselineStubFrame, savedStub))); 108 | masm.storePtr(BaselineFrameReg, Address(StackPointer, 109 | offsetof(BaselineStubFrame, savedFrame))); 110 | masm.movePtr(BaselineStackReg, BaselineFrameReg); 111 | 112 | // Stack should remain aligned. 113 | masm.assertStackAlignment(sizeof(Value), 0); 114 | } 115 | 116 | } // namespace jit 117 | } // namespace js 118 | 119 | #endif /* jit_mips_shared_SharedICHelpers_mips_shared_inl_h */ 120 | -------------------------------------------------------------------------------- /SharedICHelpers-mips-shared.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips_shared_SharedICHelpers_mips_shared_h 8 | #define jit_mips_shared_SharedICHelpers_mips_shared_h 9 | 10 | #include "jit/BaselineFrame.h" 11 | #include "jit/BaselineIC.h" 12 | #include "jit/MacroAssembler.h" 13 | #include "jit/SharedICRegisters.h" 14 | 15 | namespace js { 16 | namespace jit { 17 | 18 | // Distance from sp to the top Value inside an IC stub (no return address on 19 | // the stack on MIPS). 20 | static const size_t ICStackValueOffset = 0; 21 | 22 | struct BaselineStubFrame { 23 | uintptr_t savedFrame; 24 | uintptr_t savedStub; 25 | uintptr_t returnAddress; 26 | uintptr_t descriptor; 27 | }; 28 | 29 | // Size of values pushed by EmitBaselineEnterStubFrame. 30 | static const uint32_t STUB_FRAME_SIZE = sizeof(BaselineStubFrame); 31 | static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = offsetof(BaselineStubFrame, savedStub); 32 | 33 | inline void 34 | EmitRestoreTailCallReg(MacroAssembler& masm) 35 | { 36 | // No-op on MIPS because ra register is always holding the return address. 37 | } 38 | 39 | inline void 40 | EmitRepushTailCallReg(MacroAssembler& masm) 41 | { 42 | // No-op on MIPS because ra register is always holding the return address. 43 | } 44 | 45 | inline void 46 | EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm) 47 | { 48 | // Move ICEntry offset into ICStubReg. 49 | CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg); 50 | *patchOffset = offset; 51 | 52 | // Load stub pointer into ICStubReg. 53 | masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg); 54 | 55 | // Load stubcode pointer from BaselineStubEntry. 56 | // R2 won't be active when we call ICs, so we can use it as scratch. 57 | masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg()); 58 | 59 | // Call the stubcode via a direct jump-and-link 60 | masm.call(R2.scratchReg()); 61 | } 62 | 63 | inline void 64 | EmitEnterTypeMonitorIC(MacroAssembler& masm, 65 | size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub()) 66 | { 67 | // This is expected to be called from within an IC, when ICStubReg 68 | // is properly initialized to point to the stub. 69 | masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg); 70 | 71 | // Load stubcode pointer from BaselineStubEntry. 72 | // R2 won't be active when we call ICs, so we can use it. 73 | masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg()); 74 | 75 | // Jump to the stubcode. 76 | masm.branch(R2.scratchReg()); 77 | } 78 | 79 | inline void 80 | EmitReturnFromIC(MacroAssembler& masm) 81 | { 82 | masm.branch(ra); 83 | } 84 | 85 | inline void 86 | EmitChangeICReturnAddress(MacroAssembler& masm, Register reg) 87 | { 88 | masm.movePtr(reg, ra); 89 | } 90 | 91 | inline void 92 | EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false) 93 | { 94 | // Ion frames do not save and restore the frame pointer. If we called 95 | // into Ion, we have to restore the stack pointer from the frame descriptor. 96 | // If we performed a VM call, the descriptor has been popped already so 97 | // in that case we use the frame pointer. 98 | if (calledIntoIon) { 99 | masm.pop(ScratchRegister); 100 | masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), ScratchRegister); 101 | masm.addPtr(ScratchRegister, BaselineStackReg); 102 | } else { 103 | masm.movePtr(BaselineFrameReg, BaselineStackReg); 104 | } 105 | 106 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedFrame)), 107 | BaselineFrameReg); 108 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedStub)), 109 | ICStubReg); 110 | 111 | // Load the return address. 112 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, returnAddress)), 113 | ICTailCallReg); 114 | 115 | // Discard the frame descriptor. 116 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, descriptor)), ScratchRegister); 117 | masm.addPtr(Imm32(STUB_FRAME_SIZE), StackPointer); 118 | } 119 | 120 | template 121 | inline void 122 | EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type) 123 | { 124 | // On MIPS, $ra is clobbered by guardedCallPreBarrier. Save it first. 125 | masm.push(ra); 126 | masm.guardedCallPreBarrier(addr, type); 127 | masm.pop(ra); 128 | } 129 | 130 | inline void 131 | EmitStubGuardFailure(MacroAssembler& masm) 132 | { 133 | // Load next stub into ICStubReg 134 | masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg); 135 | 136 | // Return address is already loaded, just jump to the next stubcode. 137 | MOZ_ASSERT(ICTailCallReg == ra); 138 | masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode())); 139 | } 140 | 141 | } // namespace jit 142 | } // namespace js 143 | 144 | #endif /* jit_mips_shared_SharedICHelpers_mips_shared_h */ 145 | -------------------------------------------------------------------------------- /SharedICHelpers-ppc64le-inl.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_SharedICHelpers_ppc64le_inl_h 8 | #define jit_ppc64le_SharedICHelpers_ppc64le_inl_h 9 | 10 | #include "jit/SharedICHelpers.h" 11 | 12 | #include "jit/MacroAssembler-inl.h" 13 | 14 | namespace js { 15 | namespace jit { 16 | 17 | inline void 18 | EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t argSize) 19 | { 20 | Register scratch = R2.scratchReg(); 21 | 22 | // Compute frame size. 23 | masm.movePtr(BaselineFrameReg, scratch); 24 | masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch); 25 | masm.subPtr(BaselineStackReg, scratch); 26 | 27 | // Store frame size without VMFunction arguments for GC marking. 28 | masm.subPtr(Imm32(argSize), scratch); 29 | masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); 30 | masm.addPtr(Imm32(argSize), scratch); 31 | 32 | // Push frame descriptor and perform the tail call. 33 | // ICTailCallReg (LR) already contains the return address (as we 34 | // keep it there through the stub calls), but the VMWrapper code being 35 | // called expects the return address to also be pushed on the stack. 36 | masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, ExitFrameLayout::Size()); 37 | masm.subPtr(Imm32(sizeof(CommonFrameLayout)), StackPointer); 38 | masm.xs_mflr(ScratchReg); 39 | masm.storePtr(scratch, Address(StackPointer, CommonFrameLayout::offsetOfDescriptor())); 40 | masm.storePtr(ScratchReg, Address(StackPointer, CommonFrameLayout::offsetOfReturnAddress())); 41 | 42 | masm.jump(target); 43 | } 44 | 45 | inline void 46 | EmitIonTailCallVM(TrampolinePtr target, MacroAssembler& masm, uint32_t stackSize) 47 | { 48 | Register scratch = R2.scratchReg(); 49 | 50 | masm.loadPtr(Address(sp, stackSize), scratch); 51 | masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch); 52 | masm.addPtr(Imm32(stackSize + JitStubFrameLayout::Size() - sizeof(intptr_t)), scratch); 53 | 54 | // Push frame descriptor and return address, perform the tail call. 55 | masm.makeFrameDescriptor(scratch, JitFrame_IonJS, ExitFrameLayout::Size()); 56 | masm.xs_mflr(ScratchReg); 57 | masm.push(scratch); 58 | masm.push(ScratchReg); 59 | masm.jump(target); 60 | } 61 | 62 | inline void 63 | EmitBaselineCreateStubFrameDescriptor(MacroAssembler& masm, Register reg, uint32_t headerSize) 64 | { 65 | // Compute stub frame size. We have to add two pointers: the stub reg and 66 | // previous frame pointer pushed by EmitEnterStubFrame. 67 | masm.movePtr(BaselineFrameReg, reg); 68 | masm.addPtr(Imm32(sizeof(intptr_t) * 2), reg); 69 | masm.subPtr(BaselineStackReg, reg); 70 | 71 | masm.makeFrameDescriptor(reg, JitFrame_BaselineStub, headerSize); 72 | } 73 | 74 | inline void 75 | EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) 76 | { 77 | Register scratch = R2.scratchReg(); 78 | EmitBaselineCreateStubFrameDescriptor(masm, scratch, ExitFrameLayout::Size()); 79 | masm.push(scratch); 80 | masm.call(target); 81 | } 82 | 83 | inline void 84 | EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) 85 | { 86 | // Compute frame size. 87 | masm.movePtr(BaselineFrameReg, scratch); 88 | masm.addPtr(Imm32(BaselineFrame::FramePointerOffset), scratch); 89 | masm.subPtr(BaselineStackReg, scratch); 90 | 91 | masm.store32(scratch, Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFrameSize())); 92 | 93 | // Note: when making changes here, don't forget to update 94 | // BaselineStubFrame if needed. 95 | 96 | // Push frame descriptor and return address. 97 | masm.makeFrameDescriptor(scratch, JitFrame_BaselineJS, BaselineStubFrameLayout::Size()); 98 | masm.subPtr(Imm32(STUB_FRAME_SIZE), StackPointer); 99 | masm.xs_mflr(ScratchReg); 100 | masm.storePtr(scratch, Address(StackPointer, offsetof(BaselineStubFrame, descriptor))); 101 | masm.storePtr(ScratchReg, Address(StackPointer, 102 | offsetof(BaselineStubFrame, returnAddress))); 103 | 104 | // Save old frame pointer, stack pointer and stub reg. 105 | masm.storePtr(ICStubReg, Address(StackPointer, 106 | offsetof(BaselineStubFrame, savedStub))); 107 | masm.storePtr(BaselineFrameReg, Address(StackPointer, 108 | offsetof(BaselineStubFrame, savedFrame))); 109 | masm.movePtr(BaselineStackReg, BaselineFrameReg); 110 | 111 | // Stack should remain aligned. 112 | masm.assertStackAlignment(sizeof(Value), 0); 113 | } 114 | 115 | } // namespace jit 116 | } // namespace js 117 | 118 | #endif /* jit_ppc64le_SharedICHelpers_ppc64le_inl_h */ 119 | -------------------------------------------------------------------------------- /SharedICHelpers-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_SharedICHelpers_ppc64le_h 8 | #define jit_ppc64le_SharedICHelpers_ppc64le_h 9 | 10 | #include "jit/BaselineFrame.h" 11 | #include "jit/BaselineIC.h" 12 | #include "jit/MacroAssembler.h" 13 | #include "jit/SharedICRegisters.h" 14 | 15 | namespace js { 16 | namespace jit { 17 | 18 | // Distance from sp to the top Value inside an IC stub (no return address on 19 | // the stack on Power ISA). 20 | static const size_t ICStackValueOffset = 0; 21 | 22 | struct BaselineStubFrame { 23 | uintptr_t savedFrame; 24 | uintptr_t savedStub; 25 | uintptr_t returnAddress; 26 | uintptr_t descriptor; 27 | }; 28 | 29 | // Size of values pushed by EmitBaselineEnterStubFrame. 30 | static const uint32_t STUB_FRAME_SIZE = sizeof(BaselineStubFrame); 31 | static const uint32_t STUB_FRAME_SAVED_STUB_OFFSET = offsetof(BaselineStubFrame, savedStub); 32 | 33 | inline void 34 | EmitRestoreTailCallReg(MacroAssembler& masm) 35 | { 36 | // No-op; LR is always the return address. 37 | } 38 | 39 | inline void 40 | EmitRepushTailCallReg(MacroAssembler& masm) 41 | { 42 | // No-op the second; LR is always the return address. 43 | } 44 | 45 | inline void 46 | EmitCallIC(CodeOffset* patchOffset, MacroAssembler& masm) 47 | { 48 | // Move ICEntry offset into ICStubReg. 49 | CodeOffset offset = masm.movWithPatch(ImmWord(-1), ICStubReg); 50 | *patchOffset = offset; 51 | 52 | // Load stub pointer into ICStubReg. 53 | masm.loadPtr(Address(ICStubReg, ICEntry::offsetOfFirstStub()), ICStubReg); 54 | 55 | // Load stubcode pointer from BaselineStubEntry. 56 | // R2 won't be active when we call ICs, so we can use it as scratch. 57 | masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg()); 58 | 59 | // Call the stubcode via a direct jump-and-link 60 | masm.call(R2.scratchReg()); 61 | } 62 | 63 | inline void 64 | EmitEnterTypeMonitorIC(MacroAssembler& masm, 65 | size_t monitorStubOffset = ICMonitoredStub::offsetOfFirstMonitorStub()) 66 | { 67 | // This is expected to be called from within an IC, when ICStubReg 68 | // is properly initialized to point to the stub. 69 | masm.loadPtr(Address(ICStubReg, (uint32_t) monitorStubOffset), ICStubReg); 70 | 71 | // Load stubcode pointer from BaselineStubEntry. 72 | // R2 won't be active when we call ICs, so we can use it. 73 | masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg()); 74 | 75 | // Jump to the stubcode. 76 | masm.branch(R2.scratchReg()); 77 | } 78 | 79 | inline void 80 | EmitReturnFromIC(MacroAssembler& masm) 81 | { 82 | masm.as_blr(); 83 | } 84 | 85 | inline void 86 | EmitChangeICReturnAddress(MacroAssembler& masm, Register reg) 87 | { 88 | masm.xs_mtlr(reg); 89 | } 90 | 91 | inline void 92 | EmitBaselineLeaveStubFrame(MacroAssembler& masm, bool calledIntoIon = false) 93 | { 94 | // Ion frames do not save and restore the frame pointer. If we called 95 | // into Ion, we have to restore the stack pointer from the frame descriptor. 96 | // If we performed a VM call, the descriptor has been popped already so 97 | // in that case we use the frame pointer. 98 | if (calledIntoIon) { 99 | masm.pop(ScratchRegister); 100 | masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), ScratchRegister); 101 | masm.addPtr(ScratchRegister, BaselineStackReg); 102 | } else { 103 | masm.movePtr(BaselineFrameReg, BaselineStackReg); 104 | } 105 | 106 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedFrame)), 107 | BaselineFrameReg); 108 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, savedStub)), 109 | ICStubReg); 110 | 111 | // Load the return address. 112 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, returnAddress)), 113 | ICTailCallReg); 114 | 115 | // Discard the frame descriptor. 116 | masm.loadPtr(Address(StackPointer, offsetof(BaselineStubFrame, descriptor)), ScratchRegister); 117 | masm.addPtr(Imm32(STUB_FRAME_SIZE), StackPointer); 118 | } 119 | 120 | template 121 | inline void 122 | EmitPreBarrier(MacroAssembler& masm, const AddrType& addr, MIRType type) 123 | { 124 | // Calls made in the prebarrier may clobber LR, so save it first. 125 | masm.xs_mflr(ScratchReg); 126 | masm.push(ScratchReg); 127 | masm.guardedCallPreBarrier(addr, type); 128 | masm.pop(ScratchReg); 129 | masm.xs_mtlr(ScratchReg); 130 | } 131 | 132 | inline void 133 | EmitStubGuardFailure(MacroAssembler& masm) 134 | { 135 | // Load next stub into ICStubReg 136 | masm.loadPtr(Address(ICStubReg, ICStub::offsetOfNext()), ICStubReg); 137 | 138 | // Return address is already loaded, just jump to the next stubcode. 139 | masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode())); 140 | } 141 | 142 | } // namespace jit 143 | } // namespace js 144 | 145 | #endif /* jit_ppc64le_SharedICHelpers_ppc64le_h */ 146 | -------------------------------------------------------------------------------- /SharedICRegisters-mips64.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_mips64_SharedICRegisters_mips64_h 8 | #define jit_mips64_SharedICRegisters_mips64_h 9 | 10 | #include "jit/MacroAssembler.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | static constexpr Register BaselineFrameReg = s5; 16 | static constexpr Register BaselineStackReg = sp; 17 | 18 | // ValueOperands R0, R1, and R2. 19 | // R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value 20 | // should be preserved across calls. 21 | static constexpr ValueOperand R0(v1); 22 | static constexpr ValueOperand R1(s4); 23 | static constexpr ValueOperand R2(a6); 24 | 25 | // ICTailCallReg and ICStubReg 26 | // These use registers that are not preserved across calls. 27 | static constexpr Register ICTailCallReg = ra; 28 | static constexpr Register ICStubReg = a5; 29 | 30 | static constexpr Register ExtractTemp0 = s6; 31 | static constexpr Register ExtractTemp1 = s7; 32 | 33 | // Register used internally by MacroAssemblerMIPS. 34 | static constexpr Register BaselineSecondScratchReg = SecondScratchReg; 35 | 36 | // Note that ICTailCallReg is actually just the link register. 37 | // In MIPS code emission, we do not clobber ICTailCallReg since we keep 38 | // the return address for calls there. 39 | 40 | // FloatReg0 must be equal to ReturnFloatReg. 41 | static constexpr FloatRegister FloatReg0 = f0; 42 | static constexpr FloatRegister FloatReg1 = f2; 43 | 44 | } // namespace jit 45 | } // namespace js 46 | 47 | #endif /* jit_mips64_SharedICRegisters_mips64_h */ 48 | -------------------------------------------------------------------------------- /SharedICRegisters-ppc64le.h: -------------------------------------------------------------------------------- 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: 3 | * This Source Code Form is subject to the terms of the Mozilla Public 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 6 | 7 | #ifndef jit_ppc64le_SharedICRegisters_ppc64le_h 8 | #define jit_ppc64le_SharedICRegisters_ppc64le_h 9 | 10 | #include "jit/MacroAssembler.h" 11 | 12 | namespace js { 13 | namespace jit { 14 | 15 | // The frame register should be allocatable but non-volatile. 16 | static constexpr Register BaselineFrameReg = r13; 17 | // This is just an alias for the stack pointer currently. 18 | static constexpr Register BaselineStackReg = r1; 19 | 20 | // ValueOperands R0, R1, and R2. 21 | // R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value 22 | // should be preserved across calls. 23 | static constexpr ValueOperand R0(r4); 24 | static constexpr ValueOperand R1(r15); // non-volatile 25 | static constexpr ValueOperand R2(r5); 26 | 27 | // ICTailCallReg and ICStubReg 28 | // These use registers that are not preserved across calls. 29 | // The tail call register situation is rather weird on Power: LR is an SPR, not 30 | // a GPR. We have to do some manual patching in the JIT to deal with this issue 31 | // since it assumes it can just use the tail call register like any other 32 | // register. The invalid value is just a dummy to put something here. 33 | #error fix JIT to deal with ICTailCallReg in jit and shared 34 | static constexpr Register ICTailCallReg = InvalidReg; 35 | static constexpr Register ICStubReg = r7; 36 | 37 | static constexpr Register ExtractTemp0 = InvalidReg; 38 | static constexpr Register ExtractTemp1 = InvalidReg; 39 | 40 | // Register used internally by the Power Macro Assembler. 41 | static constexpr Register BaselineSecondScratchReg = SecondScratchReg; 42 | 43 | // FloatReg0 must be equal to ReturnFloatReg. 44 | static constexpr FloatRegister FloatReg0 = f1; 45 | static constexpr FloatRegister FloatReg1 = f2; 46 | 47 | } // namespace jit 48 | } // namespace js 49 | 50 | #endif /* jit_ppc64le_SharedICRegisters_ppc64le_h */ 51 | --------------------------------------------------------------------------------