1/*
2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef DFGSpeculativeJIT_h
27#define DFGSpeculativeJIT_h
28
29#if ENABLE(DFG_JIT)
30
31#include "DFGAbstractInterpreter.h"
32#include "DFGGenerationInfo.h"
33#include "DFGInPlaceAbstractState.h"
34#include "DFGJITCompiler.h"
35#include "DFGOSRExit.h"
36#include "DFGOSRExitJumpPlaceholder.h"
37#include "DFGSilentRegisterSavePlan.h"
38#include "DFGValueSource.h"
39#include "JITOperations.h"
40#include "MarkedAllocator.h"
41#include "PutKind.h"
42#include "ValueRecovery.h"
43#include "VirtualRegister.h"
44
45namespace JSC { namespace DFG {
46
47class GPRTemporary;
48class JSValueOperand;
49class SlowPathGenerator;
50class SpeculativeJIT;
51class SpeculateInt32Operand;
52class SpeculateStrictInt32Operand;
53class SpeculateDoubleOperand;
54class SpeculateCellOperand;
55class SpeculateBooleanOperand;
56
57enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandJSValue};
58
59inline GPRReg extractResult(GPRReg result) { return result; }
60#if USE(JSVALUE64)
61inline GPRReg extractResult(JSValueRegs result) { return result.gpr(); }
62#else
63inline JSValueRegs extractResult(JSValueRegs result) { return result; }
64#endif
65inline NoResultTag extractResult(NoResultTag) { return NoResult; }
66
67// === SpeculativeJIT ===
68//
69// The SpeculativeJIT is used to generate a fast, but potentially
70// incomplete code path for the dataflow. When code generating
71// we may make assumptions about operand types, dynamically check,
72// and bail-out to an alternate code path if these checks fail.
73// Importantly, the speculative code path cannot be reentered once
74// a speculative check has failed. This allows the SpeculativeJIT
75// to propagate type information (including information that has
76// only speculatively been asserted) through the dataflow.
77class SpeculativeJIT {
78    WTF_MAKE_FAST_ALLOCATED;
79
80    friend struct OSRExit;
81private:
82    typedef JITCompiler::TrustedImm32 TrustedImm32;
83    typedef JITCompiler::Imm32 Imm32;
84    typedef JITCompiler::TrustedImmPtr TrustedImmPtr;
85    typedef JITCompiler::ImmPtr ImmPtr;
86    typedef JITCompiler::TrustedImm64 TrustedImm64;
87    typedef JITCompiler::Imm64 Imm64;
88
89    // These constants are used to set priorities for spill order for
90    // the register allocator.
91#if USE(JSVALUE64)
92    enum SpillOrder {
93        SpillOrderConstant = 1, // no spill, and cheap fill
94        SpillOrderSpilled  = 2, // no spill
95        SpillOrderJS       = 4, // needs spill
96        SpillOrderCell     = 4, // needs spill
97        SpillOrderStorage  = 4, // needs spill
98        SpillOrderInteger  = 5, // needs spill and box
99        SpillOrderBoolean  = 5, // needs spill and box
100        SpillOrderDouble   = 6, // needs spill and convert
101    };
102#elif USE(JSVALUE32_64)
103    enum SpillOrder {
104        SpillOrderConstant = 1, // no spill, and cheap fill
105        SpillOrderSpilled  = 2, // no spill
106        SpillOrderJS       = 4, // needs spill
107        SpillOrderStorage  = 4, // needs spill
108        SpillOrderDouble   = 4, // needs spill
109        SpillOrderInteger  = 5, // needs spill and box
110        SpillOrderCell     = 5, // needs spill and box
111        SpillOrderBoolean  = 5, // needs spill and box
112    };
113#endif
114
115    enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly };
116
117public:
118    SpeculativeJIT(JITCompiler&);
119    ~SpeculativeJIT();
120
121    bool compile();
122
123    void createOSREntries();
124    void linkOSREntries(LinkBuffer&);
125
126    BasicBlock* nextBlock()
127    {
128        for (BlockIndex resultIndex = m_block->index + 1; ; resultIndex++) {
129            if (resultIndex >= m_jit.graph().numBlocks())
130                return 0;
131            if (BasicBlock* result = m_jit.graph().block(resultIndex))
132                return result;
133        }
134    }
135
136#if USE(JSVALUE64)
137    GPRReg fillJSValue(Edge);
138#elif USE(JSVALUE32_64)
139    bool fillJSValue(Edge, GPRReg&, GPRReg&, FPRReg&);
140#endif
141    GPRReg fillStorage(Edge);
142
143    // lock and unlock GPR & FPR registers.
144    void lock(GPRReg reg)
145    {
146        m_gprs.lock(reg);
147    }
148    void lock(FPRReg reg)
149    {
150        m_fprs.lock(reg);
151    }
152    void unlock(GPRReg reg)
153    {
154        m_gprs.unlock(reg);
155    }
156    void unlock(FPRReg reg)
157    {
158        m_fprs.unlock(reg);
159    }
160
161    // Used to check whether a child node is on its last use,
162    // and its machine registers may be reused.
163    bool canReuse(Node* node)
164    {
165        return generationInfo(node).canReuse();
166    }
167    bool canReuse(Edge nodeUse)
168    {
169        return canReuse(nodeUse.node());
170    }
171    GPRReg reuse(GPRReg reg)
172    {
173        m_gprs.lock(reg);
174        return reg;
175    }
176    FPRReg reuse(FPRReg reg)
177    {
178        m_fprs.lock(reg);
179        return reg;
180    }
181
182    // Allocate a gpr/fpr.
183    GPRReg allocate()
184    {
185#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
186        m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
187#endif
188        VirtualRegister spillMe;
189        GPRReg gpr = m_gprs.allocate(spillMe);
190        if (spillMe.isValid()) {
191#if USE(JSVALUE32_64)
192            GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
193            if ((info.registerFormat() & DataFormatJS))
194                m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
195#endif
196            spill(spillMe);
197        }
198        return gpr;
199    }
200    GPRReg allocate(GPRReg specific)
201    {
202#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
203        m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
204#endif
205        VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
206        if (spillMe.isValid()) {
207#if USE(JSVALUE32_64)
208            GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
209            RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
210            if ((info.registerFormat() & DataFormatJS))
211                m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
212#endif
213            spill(spillMe);
214        }
215        return specific;
216    }
217    GPRReg tryAllocate()
218    {
219        return m_gprs.tryAllocate();
220    }
221    FPRReg fprAllocate()
222    {
223#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
224        m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
225#endif
226        VirtualRegister spillMe;
227        FPRReg fpr = m_fprs.allocate(spillMe);
228        if (spillMe.isValid())
229            spill(spillMe);
230        return fpr;
231    }
232
233    // Check whether a VirtualRegsiter is currently in a machine register.
234    // We use this when filling operands to fill those that are already in
235    // machine registers first (by locking VirtualRegsiters that are already
236    // in machine register before filling those that are not we attempt to
237    // avoid spilling values we will need immediately).
238    bool isFilled(Node* node)
239    {
240        return generationInfo(node).registerFormat() != DataFormatNone;
241    }
242    bool isFilledDouble(Node* node)
243    {
244        return generationInfo(node).registerFormat() == DataFormatDouble;
245    }
246
247    // Called on an operand once it has been consumed by a parent node.
248    void use(Node* node)
249    {
250        if (!node->hasResult())
251            return;
252        GenerationInfo& info = generationInfo(node);
253
254        // use() returns true when the value becomes dead, and any
255        // associated resources may be freed.
256        if (!info.use(*m_stream))
257            return;
258
259        // Release the associated machine registers.
260        DataFormat registerFormat = info.registerFormat();
261#if USE(JSVALUE64)
262        if (registerFormat == DataFormatDouble)
263            m_fprs.release(info.fpr());
264        else if (registerFormat != DataFormatNone)
265            m_gprs.release(info.gpr());
266#elif USE(JSVALUE32_64)
267        if (registerFormat == DataFormatDouble)
268            m_fprs.release(info.fpr());
269        else if (registerFormat & DataFormatJS) {
270            m_gprs.release(info.tagGPR());
271            m_gprs.release(info.payloadGPR());
272        } else if (registerFormat != DataFormatNone)
273            m_gprs.release(info.gpr());
274#endif
275    }
276    void use(Edge nodeUse)
277    {
278        use(nodeUse.node());
279    }
280
281    RegisterSet usedRegisters();
282
283    bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
284    {
285        return m_jit.graph().masqueradesAsUndefinedWatchpointIsStillValid(codeOrigin);
286    }
287    bool masqueradesAsUndefinedWatchpointIsStillValid()
288    {
289        return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->origin.semantic);
290    }
291
292#if ENABLE(GGC)
293    void storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2);
294    void storeToWriteBarrierBuffer(JSCell*, GPRReg scratch1, GPRReg scratch2);
295
296    void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2);
297    void writeBarrier(GPRReg owner, JSCell* value, GPRReg scratch1, GPRReg scratch2);
298
299    void writeBarrier(GPRReg owner, GPRReg value, Edge valueUse, GPRReg scratch1, GPRReg scratch2);
300    void writeBarrier(JSCell* owner, GPRReg value, Edge valueUse, GPRReg scratch1, GPRReg scratch2);
301#endif
302    void compileStoreBarrier(Node*);
303
304    static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
305    {
306        return AssemblyHelpers::selectScratchGPR(preserve1, preserve2, preserve3, preserve4);
307    }
308
309    // Called by the speculative operand types, below, to fill operand to
310    // machine registers, implicitly generating speculation checks as needed.
311    GPRReg fillSpeculateInt32(Edge, DataFormat& returnFormat);
312    GPRReg fillSpeculateInt32Strict(Edge);
313    GPRReg fillSpeculateInt52(Edge, DataFormat desiredFormat);
314    FPRReg fillSpeculateDouble(Edge);
315    GPRReg fillSpeculateCell(Edge);
316    GPRReg fillSpeculateBoolean(Edge);
317    GeneratedOperandType checkGeneratedTypeForToInt32(Node*);
318
319    void addSlowPathGenerator(PassOwnPtr<SlowPathGenerator>);
320    void runSlowPathGenerators();
321
322    void compile(Node*);
323    void noticeOSRBirth(Node*);
324    void bail(AbortReason);
325    void compileCurrentBlock();
326
327    void checkArgumentTypes();
328
329    void clearGenerationInfo();
330
331    // These methods are used when generating 'unexpected'
332    // calls out from JIT code to C++ helper routines -
333    // they spill all live values to the appropriate
334    // slots in the JSStack without changing any state
335    // in the GenerationInfo.
336    SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source);
337    SilentRegisterSavePlan silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source);
338    void silentSpill(const SilentRegisterSavePlan&);
339    void silentFill(const SilentRegisterSavePlan&, GPRReg canTrample);
340
341    template<typename CollectionType>
342    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
343    {
344        ASSERT(plans.isEmpty());
345        for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
346            GPRReg gpr = iter.regID();
347            if (iter.name().isValid() && gpr != exclude && gpr != exclude2) {
348                SilentRegisterSavePlan plan = silentSavePlanForGPR(iter.name(), gpr);
349                if (doSpill)
350                    silentSpill(plan);
351                plans.append(plan);
352            }
353        }
354        for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
355            if (iter.name().isValid() && iter.regID() != fprExclude) {
356                SilentRegisterSavePlan plan = silentSavePlanForFPR(iter.name(), iter.regID());
357                if (doSpill)
358                    silentSpill(plan);
359                plans.append(plan);
360            }
361        }
362    }
363    template<typename CollectionType>
364    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, NoResultTag)
365    {
366        silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, InvalidFPRReg);
367    }
368    template<typename CollectionType>
369    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, FPRReg exclude)
370    {
371        silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, exclude);
372    }
373#if USE(JSVALUE32_64)
374    template<typename CollectionType>
375    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, JSValueRegs exclude)
376    {
377        silentSpillAllRegistersImpl(doSpill, plans, exclude.tagGPR(), exclude.payloadGPR());
378    }
379#endif
380
381    void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
382    {
383        silentSpillAllRegistersImpl(true, m_plans, exclude, exclude2, fprExclude);
384    }
385    void silentSpillAllRegisters(FPRReg exclude)
386    {
387        silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude);
388    }
389
390    static GPRReg pickCanTrample(GPRReg exclude)
391    {
392        GPRReg result = GPRInfo::regT0;
393        if (result == exclude)
394            result = GPRInfo::regT1;
395        return result;
396    }
397    static GPRReg pickCanTrample(FPRReg)
398    {
399        return GPRInfo::regT0;
400    }
401    static GPRReg pickCanTrample(NoResultTag)
402    {
403        return GPRInfo::regT0;
404    }
405
406#if USE(JSVALUE32_64)
407    static GPRReg pickCanTrample(JSValueRegs exclude)
408    {
409        GPRReg result = GPRInfo::regT0;
410        if (result == exclude.tagGPR()) {
411            result = GPRInfo::regT1;
412            if (result == exclude.payloadGPR())
413                result = GPRInfo::regT2;
414        } else if (result == exclude.payloadGPR()) {
415            result = GPRInfo::regT1;
416            if (result == exclude.tagGPR())
417                result = GPRInfo::regT2;
418        }
419        return result;
420    }
421#endif
422
423    template<typename RegisterType>
424    void silentFillAllRegisters(RegisterType exclude)
425    {
426        GPRReg canTrample = pickCanTrample(exclude);
427
428        while (!m_plans.isEmpty()) {
429            SilentRegisterSavePlan& plan = m_plans.last();
430            silentFill(plan, canTrample);
431            m_plans.removeLast();
432        }
433    }
434
435    // These methods convert between doubles, and doubles boxed and JSValues.
436#if USE(JSVALUE64)
437    GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
438    {
439        return m_jit.boxDouble(fpr, gpr);
440    }
441    FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
442    {
443        return m_jit.unboxDouble(gpr, fpr);
444    }
445    GPRReg boxDouble(FPRReg fpr)
446    {
447        return boxDouble(fpr, allocate());
448    }
449
450    void boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat);
451#elif USE(JSVALUE32_64)
452    void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
453    {
454        m_jit.boxDouble(fpr, tagGPR, payloadGPR);
455    }
456    void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
457    {
458        m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR);
459    }
460#endif
461    void boxDouble(FPRReg fpr, JSValueRegs regs)
462    {
463        m_jit.boxDouble(fpr, regs);
464    }
465
466    // Spill a VirtualRegister to the JSStack.
467    void spill(VirtualRegister spillMe)
468    {
469        GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
470
471#if USE(JSVALUE32_64)
472        if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here
473            return;
474#endif
475        // Check the GenerationInfo to see if this value need writing
476        // to the JSStack - if not, mark it as spilled & return.
477        if (!info.needsSpill()) {
478            info.setSpilled(*m_stream, spillMe);
479            return;
480        }
481
482        DataFormat spillFormat = info.registerFormat();
483        switch (spillFormat) {
484        case DataFormatStorage: {
485            // This is special, since it's not a JS value - as in it's not visible to JS
486            // code.
487            m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
488            info.spill(*m_stream, spillMe, DataFormatStorage);
489            return;
490        }
491
492        case DataFormatInt32: {
493            m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
494            info.spill(*m_stream, spillMe, DataFormatInt32);
495            return;
496        }
497
498#if USE(JSVALUE64)
499        case DataFormatDouble: {
500            m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
501            info.spill(*m_stream, spillMe, DataFormatDouble);
502            return;
503        }
504
505        case DataFormatInt52:
506        case DataFormatStrictInt52: {
507            m_jit.store64(info.gpr(), JITCompiler::addressFor(spillMe));
508            info.spill(*m_stream, spillMe, spillFormat);
509            return;
510        }
511
512        default:
513            // The following code handles JSValues, int32s, and cells.
514            RELEASE_ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
515
516            GPRReg reg = info.gpr();
517            // We need to box int32 and cell values ...
518            // but on JSVALUE64 boxing a cell is a no-op!
519            if (spillFormat == DataFormatInt32)
520                m_jit.or64(GPRInfo::tagTypeNumberRegister, reg);
521
522            // Spill the value, and record it as spilled in its boxed form.
523            m_jit.store64(reg, JITCompiler::addressFor(spillMe));
524            info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS));
525            return;
526#elif USE(JSVALUE32_64)
527        case DataFormatCell:
528        case DataFormatBoolean: {
529            m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
530            info.spill(*m_stream, spillMe, spillFormat);
531            return;
532        }
533
534        case DataFormatDouble: {
535            // On JSVALUE32_64 boxing a double is a no-op.
536            m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
537            info.spill(*m_stream, spillMe, DataFormatDouble);
538            return;
539        }
540
541        default:
542            // The following code handles JSValues.
543            RELEASE_ASSERT(spillFormat & DataFormatJS);
544            m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
545            m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
546            info.spill(*m_stream, spillMe, spillFormat);
547            return;
548#endif
549        }
550    }
551
552    bool isKnownInteger(Node* node) { return m_state.forNode(node).isType(SpecInt32); }
553    bool isKnownCell(Node* node) { return m_state.forNode(node).isType(SpecCell); }
554
555    bool isKnownNotInteger(Node* node) { return !(m_state.forNode(node).m_type & SpecInt32); }
556    bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecFullNumber); }
557    bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); }
558
559    // Checks/accessors for constant values.
560    bool isConstant(Node* node) { return m_jit.graph().isConstant(node); }
561    bool isJSConstant(Node* node) { return m_jit.graph().isJSConstant(node); }
562    bool isInt32Constant(Node* node) { return m_jit.graph().isInt32Constant(node); }
563    bool isDoubleConstant(Node* node) { return m_jit.graph().isDoubleConstant(node); }
564    bool isNumberConstant(Node* node) { return m_jit.graph().isNumberConstant(node); }
565    bool isBooleanConstant(Node* node) { return m_jit.graph().isBooleanConstant(node); }
566    bool isFunctionConstant(Node* node) { return m_jit.graph().isFunctionConstant(node); }
567    int32_t valueOfInt32Constant(Node* node) { return m_jit.graph().valueOfInt32Constant(node); }
568    double valueOfNumberConstant(Node* node) { return m_jit.graph().valueOfNumberConstant(node); }
569#if USE(JSVALUE32_64)
570    void* addressOfDoubleConstant(Node* node) { return m_jit.addressOfDoubleConstant(node); }
571#endif
572    JSValue valueOfJSConstant(Node* node) { return m_jit.graph().valueOfJSConstant(node); }
573    bool valueOfBooleanConstant(Node* node) { return m_jit.graph().valueOfBooleanConstant(node); }
574    JSFunction* valueOfFunctionConstant(Node* node) { return m_jit.graph().valueOfFunctionConstant(node); }
575    bool isNullConstant(Node* node)
576    {
577        if (!isConstant(node))
578            return false;
579        return valueOfJSConstant(node).isNull();
580    }
581
582    StringImpl* identifierUID(unsigned index)
583    {
584        return m_jit.graph().identifiers()[index];
585    }
586
587    // Spill all VirtualRegisters back to the JSStack.
588    void flushRegisters()
589    {
590        for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
591            if (iter.name().isValid()) {
592                spill(iter.name());
593                iter.release();
594            }
595        }
596        for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
597            if (iter.name().isValid()) {
598                spill(iter.name());
599                iter.release();
600            }
601        }
602    }
603
604#ifndef NDEBUG
605    // Used to ASSERT flushRegisters() has been called prior to
606    // calling out from JIT code to a C helper function.
607    bool isFlushed()
608    {
609        for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
610            if (iter.name().isValid())
611                return false;
612        }
613        for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
614            if (iter.name().isValid())
615                return false;
616        }
617        return true;
618    }
619#endif
620
621#if USE(JSVALUE64)
622    MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node)
623    {
624        return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(node)));
625    }
626#endif
627
628    // Helper functions to enable code sharing in implementations of bit/shift ops.
629    void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
630    {
631        switch (op) {
632        case BitAnd:
633            m_jit.and32(Imm32(imm), op1, result);
634            break;
635        case BitOr:
636            m_jit.or32(Imm32(imm), op1, result);
637            break;
638        case BitXor:
639            m_jit.xor32(Imm32(imm), op1, result);
640            break;
641        default:
642            RELEASE_ASSERT_NOT_REACHED();
643        }
644    }
645    void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
646    {
647        switch (op) {
648        case BitAnd:
649            m_jit.and32(op1, op2, result);
650            break;
651        case BitOr:
652            m_jit.or32(op1, op2, result);
653            break;
654        case BitXor:
655            m_jit.xor32(op1, op2, result);
656            break;
657        default:
658            RELEASE_ASSERT_NOT_REACHED();
659        }
660    }
661    void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
662    {
663        switch (op) {
664        case BitRShift:
665            m_jit.rshift32(op1, Imm32(shiftAmount), result);
666            break;
667        case BitLShift:
668            m_jit.lshift32(op1, Imm32(shiftAmount), result);
669            break;
670        case BitURShift:
671            m_jit.urshift32(op1, Imm32(shiftAmount), result);
672            break;
673        default:
674            RELEASE_ASSERT_NOT_REACHED();
675        }
676    }
677    void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
678    {
679        switch (op) {
680        case BitRShift:
681            m_jit.rshift32(op1, shiftAmount, result);
682            break;
683        case BitLShift:
684            m_jit.lshift32(op1, shiftAmount, result);
685            break;
686        case BitURShift:
687            m_jit.urshift32(op1, shiftAmount, result);
688            break;
689        default:
690            RELEASE_ASSERT_NOT_REACHED();
691        }
692    }
693
694    // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise.
695    unsigned detectPeepHoleBranch()
696    {
697        // Check that no intervening nodes will be generated.
698        for (unsigned index = m_indexInBlock + 1; index < m_block->size() - 1; ++index) {
699            Node* node = m_block->at(index);
700            if (!node->shouldGenerate())
701                continue;
702            // Check if it's a Phantom that can be safely ignored.
703            if (node->op() == Phantom && !node->child1())
704                continue;
705            return UINT_MAX;
706        }
707
708        // Check if the lastNode is a branch on this node.
709        Node* lastNode = m_block->last();
710        return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? m_block->size() - 1 : UINT_MAX;
711    }
712
713    void compileMovHint(Node*);
714    void compileMovHintAndCheck(Node*);
715
716#if USE(JSVALUE64)
717    void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
718    void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
719#elif USE(JSVALUE32_64)
720    void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
721    void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
722#endif
723
724    void compileIn(Node*);
725
726    void compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge);
727
728    void nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert = false);
729    void nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert = false);
730    bool nonSpeculativeCompareNull(Node*, Edge operand, bool invert = false);
731
732    void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
733    void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
734    bool nonSpeculativeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
735
736    void nonSpeculativePeepholeStrictEq(Node*, Node* branchNode, bool invert = false);
737    void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false);
738    bool nonSpeculativeStrictEq(Node*, bool invert = false);
739
740    void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg, GPRReg scratch2Reg);
741    void compileInstanceOf(Node*);
742
743    ptrdiff_t calleeFrameOffset(int numArgs)
744    {
745        return virtualRegisterForLocal(m_jit.graph().m_nextMachineLocal - 1 + JSStack::CallFrameHeaderSize + numArgs).offset() * sizeof(Register);
746    }
747
748    // Access to our fixed callee CallFrame.
749    MacroAssembler::Address calleeFrameSlot(int slot)
750    {
751        ASSERT(slot >= JSStack::CallerFrameAndPCSize);
752        return MacroAssembler::Address(MacroAssembler::stackPointerRegister, sizeof(Register) * (slot - JSStack::CallerFrameAndPCSize));
753    }
754
755    // Access to our fixed callee CallFrame.
756    MacroAssembler::Address calleeArgumentSlot(int argument)
757    {
758        return calleeFrameSlot(virtualRegisterForArgument(argument).offset());
759    }
760
761    MacroAssembler::Address calleeFrameTagSlot(int slot)
762    {
763        return calleeFrameSlot(slot).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
764    }
765
766    MacroAssembler::Address calleeFramePayloadSlot(int slot)
767    {
768        return calleeFrameSlot(slot).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
769    }
770
771    MacroAssembler::Address calleeArgumentTagSlot(int argument)
772    {
773        return calleeArgumentSlot(argument).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
774    }
775
776    MacroAssembler::Address calleeArgumentPayloadSlot(int argument)
777    {
778        return calleeArgumentSlot(argument).withOffset(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
779    }
780
781    MacroAssembler::Address calleeFrameCallerFrame()
782    {
783        return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset());
784    }
785
786    void emitCall(Node*);
787
788    int32_t framePointerOffsetToGetActivationRegisters()
789    {
790        return m_jit.codeBlock()->framePointerOffsetToGetActivationRegisters(
791            m_jit.graph().m_machineCaptureStart);
792    }
793
794    // Called once a node has completed code generation but prior to setting
795    // its result, to free up its children. (This must happen prior to setting
796    // the nodes result, since the node may have the same VirtualRegister as
797    // a child, and as such will use the same GeneratioInfo).
798    void useChildren(Node*);
799
800    // These method called to initialize the the GenerationInfo
801    // to describe the result of an operation.
802    void int32Result(GPRReg reg, Node* node, DataFormat format = DataFormatInt32, UseChildrenMode mode = CallUseChildren)
803    {
804        if (mode == CallUseChildren)
805            useChildren(node);
806
807        VirtualRegister virtualRegister = node->virtualRegister();
808        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
809
810        if (format == DataFormatInt32) {
811            m_jit.jitAssertIsInt32(reg);
812            m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
813            info.initInt32(node, node->refCount(), reg);
814        } else {
815#if USE(JSVALUE64)
816            RELEASE_ASSERT(format == DataFormatJSInt32);
817            m_jit.jitAssertIsJSInt32(reg);
818            m_gprs.retain(reg, virtualRegister, SpillOrderJS);
819            info.initJSValue(node, node->refCount(), reg, format);
820#elif USE(JSVALUE32_64)
821            RELEASE_ASSERT_NOT_REACHED();
822#endif
823        }
824    }
825    void int32Result(GPRReg reg, Node* node, UseChildrenMode mode)
826    {
827        int32Result(reg, node, DataFormatInt32, mode);
828    }
829    void int52Result(GPRReg reg, Node* node, DataFormat format, UseChildrenMode mode = CallUseChildren)
830    {
831        if (mode == CallUseChildren)
832            useChildren(node);
833
834        VirtualRegister virtualRegister = node->virtualRegister();
835        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
836
837        m_gprs.retain(reg, virtualRegister, SpillOrderJS);
838        info.initInt52(node, node->refCount(), reg, format);
839    }
840    void int52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
841    {
842        int52Result(reg, node, DataFormatInt52, mode);
843    }
844    void strictInt52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
845    {
846        int52Result(reg, node, DataFormatStrictInt52, mode);
847    }
848    void noResult(Node* node, UseChildrenMode mode = CallUseChildren)
849    {
850        if (mode == UseChildrenCalledExplicitly)
851            return;
852        useChildren(node);
853    }
854    void cellResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
855    {
856        if (mode == CallUseChildren)
857            useChildren(node);
858
859        VirtualRegister virtualRegister = node->virtualRegister();
860        m_gprs.retain(reg, virtualRegister, SpillOrderCell);
861        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
862        info.initCell(node, node->refCount(), reg);
863    }
864    void blessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
865    {
866#if USE(JSVALUE64)
867        jsValueResult(reg, node, DataFormatJSBoolean, mode);
868#else
869        if (mode == CallUseChildren)
870            useChildren(node);
871
872        VirtualRegister virtualRegister = node->virtualRegister();
873        m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
874        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
875        info.initBoolean(node, node->refCount(), reg);
876#endif
877    }
878    void unblessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
879    {
880#if USE(JSVALUE64)
881        blessBoolean(reg);
882#endif
883        blessedBooleanResult(reg, node, mode);
884    }
885#if USE(JSVALUE64)
886    void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
887    {
888        if (format == DataFormatJSInt32)
889            m_jit.jitAssertIsJSInt32(reg);
890
891        if (mode == CallUseChildren)
892            useChildren(node);
893
894        VirtualRegister virtualRegister = node->virtualRegister();
895        m_gprs.retain(reg, virtualRegister, SpillOrderJS);
896        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
897        info.initJSValue(node, node->refCount(), reg, format);
898    }
899    void jsValueResult(GPRReg reg, Node* node, UseChildrenMode mode)
900    {
901        jsValueResult(reg, node, DataFormatJS, mode);
902    }
903#elif USE(JSVALUE32_64)
904    void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
905    {
906        if (mode == CallUseChildren)
907            useChildren(node);
908
909        VirtualRegister virtualRegister = node->virtualRegister();
910        m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
911        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
912        info.initBoolean(node, node->refCount(), reg);
913    }
914    void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
915    {
916        if (mode == CallUseChildren)
917            useChildren(node);
918
919        VirtualRegister virtualRegister = node->virtualRegister();
920        m_gprs.retain(tag, virtualRegister, SpillOrderJS);
921        m_gprs.retain(payload, virtualRegister, SpillOrderJS);
922        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
923        info.initJSValue(node, node->refCount(), tag, payload, format);
924    }
925    void jsValueResult(GPRReg tag, GPRReg payload, Node* node, UseChildrenMode mode)
926    {
927        jsValueResult(tag, payload, node, DataFormatJS, mode);
928    }
929#endif
930    void jsValueResult(JSValueRegs regs, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
931    {
932#if USE(JSVALUE64)
933        jsValueResult(regs.gpr(), node, format, mode);
934#else
935        jsValueResult(regs.tagGPR(), regs.payloadGPR(), node, format, mode);
936#endif
937    }
938    void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
939    {
940        if (mode == CallUseChildren)
941            useChildren(node);
942
943        VirtualRegister virtualRegister = node->virtualRegister();
944        m_gprs.retain(reg, virtualRegister, SpillOrderStorage);
945        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
946        info.initStorage(node, node->refCount(), reg);
947    }
948    void doubleResult(FPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
949    {
950        if (mode == CallUseChildren)
951            useChildren(node);
952
953        VirtualRegister virtualRegister = node->virtualRegister();
954        m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
955        GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
956        info.initDouble(node, node->refCount(), reg);
957    }
958    void initConstantInfo(Node* node)
959    {
960        ASSERT(isInt32Constant(node) || isNumberConstant(node) || isJSConstant(node));
961        generationInfo(node).initConstant(node, node->refCount());
962    }
963
964    // These methods add calls to C++ helper functions.
965    // These methods are broadly value representation specific (i.e.
966    // deal with the fact that a JSValue may be passed in one or two
967    // machine registers, and delegate the calling convention specific
968    // decision as to how to fill the regsiters to setupArguments* methods.
969
970    JITCompiler::Call callOperation(P_JITOperation_E operation, GPRReg result)
971    {
972        m_jit.setupArgumentsExecState();
973        return appendCallWithExceptionCheckSetResult(operation, result);
974    }
975    JITCompiler::Call callOperation(P_JITOperation_EC operation, GPRReg result, GPRReg cell)
976    {
977        m_jit.setupArgumentsWithExecState(cell);
978        return appendCallWithExceptionCheckSetResult(operation, result);
979    }
980    JITCompiler::Call callOperation(P_JITOperation_EO operation, GPRReg result, GPRReg object)
981    {
982        m_jit.setupArgumentsWithExecState(object);
983        return appendCallWithExceptionCheckSetResult(operation, result);
984    }
985    JITCompiler::Call callOperation(P_JITOperation_EOS operation, GPRReg result, GPRReg object, size_t size)
986    {
987        m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size));
988        return appendCallWithExceptionCheckSetResult(operation, result);
989    }
990    JITCompiler::Call callOperation(P_JITOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size)
991    {
992        m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size));
993        return appendCallWithExceptionCheckSetResult(operation, result);
994    }
995    JITCompiler::Call callOperation(C_JITOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size)
996    {
997        m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(static_cast<size_t>(size)));
998        return appendCallWithExceptionCheckSetResult(operation, result);
999    }
1000    JITCompiler::Call callOperation(P_JITOperation_EPS operation, GPRReg result, GPRReg old, size_t size)
1001    {
1002        m_jit.setupArgumentsWithExecState(old, TrustedImmPtr(size));
1003        return appendCallWithExceptionCheckSetResult(operation, result);
1004    }
1005    JITCompiler::Call callOperation(P_JITOperation_ES operation, GPRReg result, size_t size)
1006    {
1007        m_jit.setupArgumentsWithExecState(TrustedImmPtr(size));
1008        return appendCallWithExceptionCheckSetResult(operation, result);
1009    }
1010    JITCompiler::Call callOperation(P_JITOperation_ESJss operation, GPRReg result, size_t index, GPRReg arg1)
1011    {
1012        m_jit.setupArgumentsWithExecState(TrustedImmPtr(index), arg1);
1013        return appendCallWithExceptionCheckSetResult(operation, result);
1014    }
1015    JITCompiler::Call callOperation(P_JITOperation_ESt operation, GPRReg result, Structure* structure)
1016    {
1017        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure));
1018        return appendCallWithExceptionCheckSetResult(operation, result);
1019    }
1020    JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg arg2)
1021    {
1022        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2);
1023        return appendCallWithExceptionCheckSetResult(operation, result);
1024    }
1025    JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, Structure* structure, size_t arg2)
1026    {
1027        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(arg2));
1028        return appendCallWithExceptionCheckSetResult(operation, result);
1029    }
1030    JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1031    {
1032        m_jit.setupArgumentsWithExecState(arg1, arg2);
1033        return appendCallWithExceptionCheckSetResult(operation, result);
1034    }
1035    JITCompiler::Call callOperation(P_JITOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size)
1036    {
1037        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(pointer), TrustedImmPtr(size));
1038        return appendCallWithExceptionCheckSetResult(operation, result);
1039    }
1040    JITCompiler::Call callOperation(P_JITOperation_EStSS operation, GPRReg result, Structure* structure, size_t index, size_t size)
1041    {
1042        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(index), TrustedImmPtr(size));
1043        return appendCallWithExceptionCheckSetResult(operation, result);
1044    }
1045    JITCompiler::Call callOperation(C_JITOperation_E operation, GPRReg result)
1046    {
1047        m_jit.setupArgumentsExecState();
1048        return appendCallWithExceptionCheckSetResult(operation, result);
1049    }
1050    JITCompiler::Call callOperation(C_JITOperation_EC operation, GPRReg result, GPRReg arg1)
1051    {
1052        m_jit.setupArgumentsWithExecState(arg1);
1053        return appendCallWithExceptionCheckSetResult(operation, result);
1054    }
1055    JITCompiler::Call callOperation(C_JITOperation_EC operation, GPRReg result, JSCell* cell)
1056    {
1057        m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
1058        return appendCallWithExceptionCheckSetResult(operation, result);
1059    }
1060    JITCompiler::Call callOperation(C_JITOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
1061    {
1062        m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
1063        return appendCallWithExceptionCheckSetResult(operation, result);
1064    }
1065    JITCompiler::Call callOperation(C_JITOperation_EIcf operation, GPRReg result, InlineCallFrame* inlineCallFrame)
1066    {
1067        m_jit.setupArgumentsWithExecState(TrustedImmPtr(inlineCallFrame));
1068        return appendCallWithExceptionCheckSetResult(operation, result);
1069    }
1070    JITCompiler::Call callOperation(C_JITOperation_ESt operation, GPRReg result, Structure* structure)
1071    {
1072        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure));
1073        return appendCallWithExceptionCheckSetResult(operation, result);
1074    }
1075    JITCompiler::Call callOperation(C_JITOperation_EJssSt operation, GPRReg result, GPRReg arg1, Structure* structure)
1076    {
1077        m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(structure));
1078        return appendCallWithExceptionCheckSetResult(operation, result);
1079    }
1080    JITCompiler::Call callOperation(C_JITOperation_EJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1081    {
1082        m_jit.setupArgumentsWithExecState(arg1, arg2);
1083        return appendCallWithExceptionCheckSetResult(operation, result);
1084    }
1085    JITCompiler::Call callOperation(C_JITOperation_EJssJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1086    {
1087        m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1088        return appendCallWithExceptionCheckSetResult(operation, result);
1089    }
1090
1091    JITCompiler::Call callOperation(S_JITOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1092    {
1093        m_jit.setupArgumentsWithExecState(arg1, arg2);
1094        return appendCallWithExceptionCheckSetResult(operation, result);
1095    }
1096
1097    JITCompiler::Call callOperation(Jss_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
1098    {
1099        m_jit.setupArgumentsWithExecState(arg1);
1100        return appendCallWithExceptionCheckSetResult(operation, result);
1101    }
1102
1103    JITCompiler::Call callOperation(V_JITOperation_EC operation, GPRReg arg1)
1104    {
1105        m_jit.setupArgumentsWithExecState(arg1);
1106        return appendCallWithExceptionCheck(operation);
1107    }
1108
1109    JITCompiler::Call callOperation(V_JITOperation_EC operation, JSCell* arg1)
1110    {
1111        m_jit.setupArgumentsWithExecState(TrustedImmPtr(arg1));
1112        return appendCallWithExceptionCheck(operation);
1113    }
1114
1115    JITCompiler::Call callOperation(V_JITOperation_ECIcf operation, GPRReg arg1, InlineCallFrame* inlineCallFrame)
1116    {
1117        m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(inlineCallFrame));
1118        return appendCallWithExceptionCheck(operation);
1119    }
1120    JITCompiler::Call callOperation(V_JITOperation_ECCIcf operation, GPRReg arg1, GPRReg arg2, InlineCallFrame* inlineCallFrame)
1121    {
1122        m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(inlineCallFrame));
1123        return appendCallWithExceptionCheck(operation);
1124    }
1125
1126    JITCompiler::Call callOperation(V_JITOperation_ECZ operation, GPRReg arg1, int arg2)
1127    {
1128        m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
1129        return appendCallWithExceptionCheck(operation);
1130    }
1131    JITCompiler::Call callOperation(V_JITOperation_ECC operation, GPRReg arg1, GPRReg arg2)
1132    {
1133        m_jit.setupArgumentsWithExecState(arg1, arg2);
1134        return appendCallWithExceptionCheck(operation);
1135    }
1136    JITCompiler::Call callOperation(V_JITOperation_ECC operation, GPRReg arg1, JSCell* arg2)
1137    {
1138        m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
1139        return appendCallWithExceptionCheck(operation);
1140    }
1141    JITCompiler::Call callOperation(V_JITOperation_ECC operation, JSCell* arg1, GPRReg arg2)
1142    {
1143        m_jit.setupArgumentsWithExecState(TrustedImmPtr(arg1), arg2);
1144        return appendCallWithExceptionCheck(operation);
1145    }
1146
1147    JITCompiler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb operation, void* pointer)
1148    {
1149        m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
1150        return appendCallWithCallFrameRollbackOnException(operation);
1151    }
1152
1153    JITCompiler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E operation, GPRReg result)
1154    {
1155        m_jit.setupArgumentsExecState();
1156        return appendCallWithCallFrameRollbackOnExceptionSetResult(operation, result);
1157    }
1158
1159    template<typename FunctionType, typename ArgumentType1>
1160    JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1)
1161    {
1162        return callOperation(operation, arg1);
1163    }
1164    template<typename FunctionType, typename ArgumentType1, typename ArgumentType2>
1165    JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2)
1166    {
1167        return callOperation(operation, arg1, arg2);
1168    }
1169    template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3>
1170    JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3)
1171    {
1172        return callOperation(operation, arg1, arg2, arg3);
1173    }
1174    template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4>
1175    JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4)
1176    {
1177        return callOperation(operation, arg1, arg2, arg3, arg4);
1178    }
1179    template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4, typename ArgumentType5>
1180    JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5)
1181    {
1182        return callOperation(operation, arg1, arg2, arg3, arg4, arg5);
1183    }
1184
1185    JITCompiler::Call callOperation(D_JITOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
1186    {
1187        m_jit.setupArguments(arg1, arg2);
1188        return appendCallSetResult(operation, result);
1189    }
1190    JITCompiler::Call callOperation(D_JITOperation_D operation, FPRReg result, FPRReg arg1)
1191    {
1192        m_jit.setupArguments(arg1);
1193        return appendCallSetResult(operation, result);
1194    }
1195    JITCompiler::Call callOperation(D_JITOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
1196    {
1197        m_jit.setupArguments(arg1, arg2);
1198        return appendCallSetResult(operation, result);
1199    }
1200    JITCompiler::Call callOperation(I_JITOperation_EJss operation, GPRReg result, GPRReg arg1)
1201    {
1202        m_jit.setupArgumentsWithExecState(arg1);
1203        return appendCallWithExceptionCheckSetResult(operation, result);
1204    }
1205    JITCompiler::Call callOperation(C_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
1206    {
1207        m_jit.setupArgumentsWithExecState(arg1);
1208        return appendCallWithExceptionCheckSetResult(operation, result);
1209    }
1210    JITCompiler::Call callOperation(C_JITOperation_EZ operation, GPRReg result, int32_t arg1)
1211    {
1212        m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
1213        return appendCallWithExceptionCheckSetResult(operation, result);
1214    }
1215
1216#if USE(JSVALUE64)
1217    JITCompiler::Call callOperation(J_JITOperation_E operation, GPRReg result)
1218    {
1219        m_jit.setupArgumentsExecState();
1220        return appendCallWithExceptionCheckSetResult(operation, result);
1221    }
1222    JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg result, void* pointer)
1223    {
1224        m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
1225        return appendCallWithExceptionCheckSetResult(operation, result);
1226    }
1227    JITCompiler::Call callOperation(Z_JITOperation_D operation, GPRReg result, FPRReg arg1)
1228    {
1229        m_jit.setupArguments(arg1);
1230        JITCompiler::Call call = m_jit.appendCall(operation);
1231        m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
1232        return call;
1233    }
1234    JITCompiler::Call callOperation(Q_JITOperation_J operation, GPRReg result, GPRReg value)
1235    {
1236        m_jit.setupArguments(value);
1237        return appendCallSetResult(operation, result);
1238    }
1239    JITCompiler::Call callOperation(Q_JITOperation_D operation, GPRReg result, FPRReg value)
1240    {
1241        m_jit.setupArguments(value);
1242        return appendCallSetResult(operation, result);
1243    }
1244    JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg result, StringImpl* uid)
1245    {
1246        m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid));
1247        return appendCallWithExceptionCheckSetResult(operation, result);
1248    }
1249    JITCompiler::Call callOperation(J_JITOperation_EA operation, GPRReg result, GPRReg arg1)
1250    {
1251        m_jit.setupArgumentsWithExecState(arg1);
1252        return appendCallWithExceptionCheckSetResult(operation, result);
1253    }
1254    JITCompiler::Call callOperation(J_JITOperation_EAZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1255    {
1256        m_jit.setupArgumentsWithExecState(arg1, arg2);
1257        return appendCallWithExceptionCheckSetResult(operation, result);
1258    }
1259    JITCompiler::Call callOperation(J_JITOperation_EJssZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1260    {
1261        m_jit.setupArgumentsWithExecState(arg1, arg2);
1262        return appendCallWithExceptionCheckSetResult(operation, result);
1263    }
1264    JITCompiler::Call callOperation(J_JITOperation_EPS operation, GPRReg result, void* pointer, size_t size)
1265    {
1266        m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
1267        return appendCallWithExceptionCheckSetResult(operation, result);
1268    }
1269    JITCompiler::Call callOperation(J_JITOperation_ESS operation, GPRReg result, int startConstant, int numConstants)
1270    {
1271        m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
1272        return appendCallWithExceptionCheckSetResult(operation, result);
1273    }
1274    JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer)
1275    {
1276        m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
1277        return appendCallWithExceptionCheckSetResult(operation, result);
1278    }
1279    JITCompiler::Call callOperation(J_JITOperation_EC operation, GPRReg result, JSCell* cell)
1280    {
1281        m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
1282        return appendCallWithExceptionCheckSetResult(operation, result);
1283    }
1284    JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, const StringImpl* uid)
1285    {
1286        m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
1287        return appendCallWithExceptionCheckSetResult(operation, result);
1288    }
1289    JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, StringImpl* uid)
1290    {
1291        m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
1292        return appendCallWithExceptionCheckSetResult(operation, result);
1293    }
1294    JITCompiler::Call callOperation(J_JITOperation_EDA operation, GPRReg result, FPRReg arg1, GPRReg arg2)
1295    {
1296        m_jit.setupArgumentsWithExecState(arg1, arg2);
1297        return appendCallWithExceptionCheckSetResult(operation, result);
1298    }
1299    JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1300    {
1301        m_jit.setupArgumentsWithExecState(arg1, arg2);
1302        return appendCallWithExceptionCheckSetResult(operation, result);
1303    }
1304    JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg result, GPRReg arg1)
1305    {
1306        m_jit.setupArgumentsWithExecState(arg1);
1307        return appendCallWithExceptionCheckSetResult(operation, result);
1308    }
1309    JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
1310    {
1311        m_jit.setupArgumentsWithExecState(arg1);
1312        return appendCallWithExceptionCheckSetResult(operation, result);
1313    }
1314    JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg result, int32_t arg1)
1315    {
1316        m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
1317        return appendCallWithExceptionCheckSetResult(operation, result);
1318    }
1319    JITCompiler::Call callOperation(J_JITOperation_EZZ operation, GPRReg result, int32_t arg1, GPRReg arg2)
1320    {
1321        m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2);
1322        return appendCallWithExceptionCheckSetResult(operation, result);
1323    }
1324    JITCompiler::Call callOperation(J_JITOperation_EZIcfZ operation, GPRReg result, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2)
1325    {
1326        m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2);
1327        return appendCallWithExceptionCheckSetResult(operation, result);
1328    }
1329
1330    JITCompiler::Call callOperation(P_JITOperation_EJS operation, GPRReg result, GPRReg value, size_t index)
1331    {
1332        m_jit.setupArgumentsWithExecState(value, TrustedImmPtr(index));
1333        return appendCallSetResult(operation, result);
1334    }
1335
1336    JITCompiler::Call callOperation(P_JITOperation_EStJ operation, GPRReg result, Structure* structure, GPRReg arg2)
1337    {
1338        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2);
1339        return appendCallWithExceptionCheckSetResult(operation, result);
1340    }
1341
1342    JITCompiler::Call callOperation(C_JITOperation_EJ operation, GPRReg result, GPRReg arg1)
1343    {
1344        m_jit.setupArgumentsWithExecState(arg1);
1345        return appendCallWithExceptionCheckSetResult(operation, result);
1346    }
1347    JITCompiler::Call callOperation(S_JITOperation_J operation, GPRReg result, GPRReg arg1)
1348    {
1349        m_jit.setupArguments(arg1);
1350        return appendCallSetResult(operation, result);
1351    }
1352    JITCompiler::Call callOperation(S_JITOperation_EJ operation, GPRReg result, GPRReg arg1)
1353    {
1354        m_jit.setupArgumentsWithExecState(arg1);
1355        return appendCallWithExceptionCheckSetResult(operation, result);
1356    }
1357    JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg result, GPRReg arg1)
1358    {
1359        m_jit.setupArgumentsWithExecState(arg1);
1360        return appendCallWithExceptionCheckSetResult(operation, result);
1361    }
1362    JITCompiler::Call callOperation(S_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1363    {
1364        m_jit.setupArgumentsWithExecState(arg1, arg2);
1365        return appendCallWithExceptionCheckSetResult(operation, result);
1366    }
1367
1368    JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1369    {
1370        m_jit.setupArgumentsWithExecState(arg1, arg2);
1371        return appendCallWithExceptionCheckSetResult(operation, result);
1372    }
1373    JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1374    {
1375        m_jit.setupArgumentsWithExecState(arg1, arg2);
1376        return appendCallWithExceptionCheckSetResult(operation, result);
1377    }
1378    JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm)
1379    {
1380        m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))));
1381        return appendCallWithExceptionCheckSetResult(operation, result);
1382    }
1383    JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2)
1384    {
1385        m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))), arg2);
1386        return appendCallWithExceptionCheckSetResult(operation, result);
1387    }
1388    JITCompiler::Call callOperation(J_JITOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1389    {
1390        m_jit.setupArgumentsWithExecState(arg1, arg2);
1391        return appendCallWithExceptionCheckSetResult(operation, result);
1392    }
1393    JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
1394    {
1395        m_jit.setupArgumentsWithExecState(arg1, arg2);
1396        return appendCallWithExceptionCheckSetResult(operation, result);
1397    }
1398    JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg result, GPRReg arg1, JSValueRegs arg2)
1399    {
1400        m_jit.setupArgumentsWithExecState(arg1, arg2.gpr());
1401        return appendCallWithExceptionCheckSetResult(operation, result);
1402    }
1403
1404    JITCompiler::Call callOperation(V_JITOperation_EOZD operation, GPRReg arg1, GPRReg arg2, FPRReg arg3)
1405    {
1406        m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1407        return appendCallWithExceptionCheck(operation);
1408    }
1409    JITCompiler::Call callOperation(V_JITOperation_EJ operation, GPRReg arg1)
1410    {
1411        m_jit.setupArgumentsWithExecState(arg1);
1412        return appendCallWithExceptionCheck(operation);
1413    }
1414    JITCompiler::Call callOperation(V_JITOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer)
1415    {
1416        m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
1417        return appendCallWithExceptionCheck(operation);
1418    }
1419    JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, StringImpl* uid)
1420    {
1421        m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, arg2, TrustedImmPtr(uid));
1422        return appendCallWithExceptionCheck(operation);
1423    }
1424    JITCompiler::Call callOperation(V_JITOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1425    {
1426        m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1427        return appendCallWithExceptionCheck(operation);
1428    }
1429    JITCompiler::Call callOperation(V_JITOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1430    {
1431        m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1432        return appendCallWithExceptionCheck(operation);
1433    }
1434
1435    JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1436    {
1437        m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1438        return appendCallWithExceptionCheck(operation);
1439    }
1440    JITCompiler::Call callOperation(V_JITOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
1441    {
1442        m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
1443        return appendCallWithExceptionCheck(operation);
1444    }
1445
1446    JITCompiler::Call callOperation(V_JITOperation_EVwsJ operation, VariableWatchpointSet* watchpointSet, GPRReg arg)
1447    {
1448        m_jit.setupArgumentsWithExecState(TrustedImmPtr(watchpointSet), arg);
1449        return appendCall(operation);
1450    }
1451
1452    JITCompiler::Call callOperation(D_JITOperation_EJ operation, FPRReg result, GPRReg arg1)
1453    {
1454        m_jit.setupArgumentsWithExecState(arg1);
1455        return appendCallWithExceptionCheckSetResult(operation, result);
1456    }
1457
1458#else // USE(JSVALUE32_64)
1459
1460// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
1461// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
1462#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
1463#define EABI_32BIT_DUMMY_ARG      TrustedImm32(0),
1464#else
1465#define EABI_32BIT_DUMMY_ARG
1466#endif
1467
1468// JSVALUE32_64 is a 64-bit integer that cannot be put half in an argument register and half on stack when using SH4 architecture.
1469// To avoid this, let's occupy the 4th argument register (r7) with a dummy argument when necessary. This must only be done when there
1470// is no other 32-bit value argument behind this 64-bit JSValue.
1471#if CPU(SH4)
1472#define SH4_32BIT_DUMMY_ARG      TrustedImm32(0),
1473#else
1474#define SH4_32BIT_DUMMY_ARG
1475#endif
1476
1477    JITCompiler::Call callOperation(Z_JITOperation_D operation, GPRReg result, FPRReg arg1)
1478    {
1479        prepareForExternalCall();
1480        m_jit.setupArguments(arg1);
1481        JITCompiler::Call call = m_jit.appendCall(operation);
1482        m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
1483        return call;
1484    }
1485    JITCompiler::Call callOperation(J_JITOperation_E operation, GPRReg resultTag, GPRReg resultPayload)
1486    {
1487        m_jit.setupArgumentsExecState();
1488        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1489    }
1490    JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
1491    {
1492        m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
1493        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1494    }
1495    JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer)
1496    {
1497        m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
1498        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1499    }
1500    JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
1501    {
1502        m_jit.setupArgumentsWithExecState(arg1);
1503        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1504    }
1505    JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, StringImpl* uid)
1506    {
1507        m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid));
1508        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1509    }
1510    JITCompiler::Call callOperation(J_JITOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
1511    {
1512        m_jit.setupArgumentsWithExecState(arg1);
1513        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1514    }
1515    JITCompiler::Call callOperation(J_JITOperation_EAZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
1516    {
1517        m_jit.setupArgumentsWithExecState(arg1, arg2);
1518        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1519    }
1520    JITCompiler::Call callOperation(J_JITOperation_EJssZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
1521    {
1522        m_jit.setupArgumentsWithExecState(arg1, arg2);
1523        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1524    }
1525    JITCompiler::Call callOperation(J_JITOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size)
1526    {
1527        m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
1528        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1529    }
1530    JITCompiler::Call callOperation(J_JITOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants)
1531    {
1532        m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
1533        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1534    }
1535    JITCompiler::Call callOperation(J_JITOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer)
1536    {
1537        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(pointer));
1538        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1539    }
1540    JITCompiler::Call callOperation(J_JITOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
1541    {
1542        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
1543        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1544    }
1545
1546    JITCompiler::Call callOperation(J_JITOperation_EC operation, GPRReg resultTag, GPRReg resultPayload, JSCell* cell)
1547    {
1548        m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
1549        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1550    }
1551    JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1, const StringImpl* uid)
1552    {
1553        m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
1554        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1555    }
1556    JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, StringImpl* uid)
1557    {
1558        m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
1559        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1560    }
1561    JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, StringImpl* uid)
1562    {
1563        m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(uid));
1564        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1565    }
1566    JITCompiler::Call callOperation(J_JITOperation_EDA operation, GPRReg resultTag, GPRReg resultPayload, FPRReg arg1, GPRReg arg2)
1567    {
1568        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1, arg2);
1569        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1570    }
1571    JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
1572    {
1573        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
1574        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1575    }
1576    JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, TrustedImm32 arg1Tag, GPRReg arg1Payload, GPRReg arg2)
1577    {
1578        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
1579        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1580    }
1581    JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload)
1582    {
1583        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
1584        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1585    }
1586    JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
1587    {
1588        m_jit.setupArgumentsWithExecState(arg1);
1589        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1590    }
1591    JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1)
1592    {
1593        m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
1594        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1595    }
1596    JITCompiler::Call callOperation(J_JITOperation_EZIcfZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2)
1597    {
1598        m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2);
1599        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1600    }
1601    JITCompiler::Call callOperation(J_JITOperation_EZZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, GPRReg arg2)
1602    {
1603        m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2);
1604        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1605    }
1606
1607    JITCompiler::Call callOperation(P_JITOperation_EJS operation, GPRReg result, JSValueRegs value, size_t index)
1608    {
1609        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG value.payloadGPR(), value.tagGPR(), TrustedImmPtr(index));
1610        return appendCallSetResult(operation, result);
1611    }
1612
1613    JITCompiler::Call callOperation(P_JITOperation_EStJ operation, GPRReg result, Structure* structure, GPRReg arg2Tag, GPRReg arg2Payload)
1614    {
1615        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2Payload, arg2Tag);
1616        return appendCallWithExceptionCheckSetResult(operation, result);
1617    }
1618
1619    JITCompiler::Call callOperation(C_JITOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
1620    {
1621        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
1622        return appendCallWithExceptionCheckSetResult(operation, result);
1623    }
1624    JITCompiler::Call callOperation(S_JITOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
1625    {
1626        m_jit.setupArguments(arg1Payload, arg1Tag);
1627        return appendCallSetResult(operation, result);
1628    }
1629    JITCompiler::Call callOperation(S_JITOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
1630    {
1631        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
1632        return appendCallWithExceptionCheckSetResult(operation, result);
1633    }
1634
1635    JITCompiler::Call callOperation(S_JITOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
1636    {
1637        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
1638        return appendCallWithExceptionCheckSetResult(operation, result);
1639    }
1640    JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
1641    {
1642        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
1643        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1644    }
1645    JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::TrustedImm32 imm)
1646    {
1647        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag));
1648        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1649    }
1650    JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::TrustedImm32 imm, GPRReg arg2Tag, GPRReg arg2Payload)
1651    {
1652        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag), SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
1653        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1654    }
1655
1656    JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
1657    {
1658        m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
1659        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1660    }
1661    JITCompiler::Call callOperation(J_JITOperation_ECJ operation, JSValueRegs result, GPRReg arg1, JSValueRegs arg2)
1662    {
1663        m_jit.setupArgumentsWithExecState(arg1, arg2.payloadGPR(), arg2.tagGPR());
1664        return appendCallWithExceptionCheckSetResult(operation, result.payloadGPR(), result.tagGPR());
1665    }
1666    JITCompiler::Call callOperation(J_JITOperation_ECC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
1667    {
1668        m_jit.setupArgumentsWithExecState(arg1, arg2);
1669        return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
1670    }
1671
1672    JITCompiler::Call callOperation(V_JITOperation_EOZD operation, GPRReg arg1, GPRReg arg2, FPRReg arg3)
1673    {
1674        m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3);
1675        return appendCallWithExceptionCheck(operation);
1676    }
1677
1678    JITCompiler::Call callOperation(V_JITOperation_EJ operation, GPRReg arg1Tag, GPRReg arg1Payload)
1679    {
1680        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
1681        return appendCallWithExceptionCheck(operation);
1682    }
1683
1684    JITCompiler::Call callOperation(V_JITOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer)
1685    {
1686        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
1687        return appendCallWithExceptionCheck(operation);
1688    }
1689    JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Payload, StringImpl* uid)
1690    {
1691        m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, arg2Payload, TrustedImm32(JSValue::CellTag), TrustedImmPtr(uid));
1692        return appendCallWithExceptionCheck(operation);
1693    }
1694    JITCompiler::Call callOperation(V_JITOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
1695    {
1696        m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
1697        return appendCallWithExceptionCheck(operation);
1698    }
1699
1700    JITCompiler::Call callOperation(V_JITOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
1701    {
1702        m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
1703        return appendCallWithExceptionCheck(operation);
1704    }
1705
1706    JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
1707    {
1708        m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
1709        return appendCallWithExceptionCheck(operation);
1710    }
1711    JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, TrustedImm32 arg3Tag, GPRReg arg3Payload)
1712    {
1713        m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
1714        return appendCallWithExceptionCheck(operation);
1715    }
1716
1717    JITCompiler::Call callOperation(V_JITOperation_EVwsJ operation, VariableWatchpointSet* watchpointSet, GPRReg argTag, GPRReg argPayload)
1718    {
1719        m_jit.setupArgumentsWithExecState(TrustedImmPtr(watchpointSet), argPayload, argTag);
1720        return appendCall(operation);
1721    }
1722
1723    JITCompiler::Call callOperation(D_JITOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
1724    {
1725        m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
1726        return appendCallWithExceptionCheckSetResult(operation, result);
1727    }
1728
1729#undef EABI_32BIT_DUMMY_ARG
1730#undef SH4_32BIT_DUMMY_ARG
1731
1732    template<typename FunctionType>
1733    JITCompiler::Call callOperation(
1734        FunctionType operation, JSValueRegs result)
1735    {
1736        return callOperation(operation, result.tagGPR(), result.payloadGPR());
1737    }
1738    template<typename FunctionType, typename ArgumentType1>
1739    JITCompiler::Call callOperation(
1740        FunctionType operation, JSValueRegs result, ArgumentType1 arg1)
1741    {
1742        return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1);
1743    }
1744    template<typename FunctionType, typename ArgumentType1, typename ArgumentType2>
1745    JITCompiler::Call callOperation(
1746        FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2)
1747    {
1748        return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2);
1749    }
1750    template<
1751        typename FunctionType, typename ArgumentType1, typename ArgumentType2,
1752        typename ArgumentType3>
1753    JITCompiler::Call callOperation(
1754        FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
1755        ArgumentType3 arg3)
1756    {
1757        return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3);
1758    }
1759    template<
1760        typename FunctionType, typename ArgumentType1, typename ArgumentType2,
1761        typename ArgumentType3, typename ArgumentType4>
1762    JITCompiler::Call callOperation(
1763        FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
1764        ArgumentType3 arg3, ArgumentType4 arg4)
1765    {
1766        return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4);
1767    }
1768    template<
1769        typename FunctionType, typename ArgumentType1, typename ArgumentType2,
1770        typename ArgumentType3, typename ArgumentType4, typename ArgumentType5>
1771    JITCompiler::Call callOperation(
1772        FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
1773        ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5)
1774    {
1775        return callOperation(
1776            operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4, arg5);
1777    }
1778#endif // USE(JSVALUE32_64)
1779
1780#if !defined(NDEBUG) && !CPU(ARM) && !CPU(MIPS) && !CPU(SH4)
1781    void prepareForExternalCall()
1782    {
1783        // We're about to call out to a "native" helper function. The helper
1784        // function is expected to set topCallFrame itself with the ExecState
1785        // that is passed to it.
1786        //
1787        // We explicitly trash topCallFrame here so that we'll know if some of
1788        // the helper functions are not setting topCallFrame when they should
1789        // be doing so. Note: the previous value in topcallFrame was not valid
1790        // anyway since it was not being updated by JIT'ed code by design.
1791
1792        for (unsigned i = 0; i < sizeof(void*) / 4; i++)
1793            m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit.vm()->topCallFrame) + i * 4);
1794    }
1795#else
1796    void prepareForExternalCall() { }
1797#endif
1798
1799    // These methods add call instructions, with optional exception checks & setting results.
1800    JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
1801    {
1802        prepareForExternalCall();
1803        m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
1804        JITCompiler::Call call = m_jit.appendCall(function);
1805        m_jit.exceptionCheck();
1806        return call;
1807    }
1808    JITCompiler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr& function)
1809    {
1810        prepareForExternalCall();
1811        m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
1812        JITCompiler::Call call = m_jit.appendCall(function);
1813        m_jit.exceptionCheckWithCallFrameRollback();
1814        return call;
1815    }
1816    JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result)
1817    {
1818        JITCompiler::Call call = appendCallWithExceptionCheck(function);
1819        if ((result != InvalidGPRReg) && (result != GPRInfo::returnValueGPR))
1820            m_jit.move(GPRInfo::returnValueGPR, result);
1821        return call;
1822    }
1823    JITCompiler::Call appendCallWithCallFrameRollbackOnExceptionSetResult(const FunctionPtr& function, GPRReg result)
1824    {
1825        JITCompiler::Call call = appendCallWithCallFrameRollbackOnException(function);
1826        if ((result != InvalidGPRReg) && (result != GPRInfo::returnValueGPR))
1827            m_jit.move(GPRInfo::returnValueGPR, result);
1828        return call;
1829    }
1830    JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result)
1831    {
1832        prepareForExternalCall();
1833        m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
1834        JITCompiler::Call call = m_jit.appendCall(function);
1835        if (result != InvalidGPRReg)
1836            m_jit.move(GPRInfo::returnValueGPR, result);
1837        return call;
1838    }
1839    JITCompiler::Call appendCall(const FunctionPtr& function)
1840    {
1841        prepareForExternalCall();
1842        m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
1843        return m_jit.appendCall(function);
1844    }
1845    JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2)
1846    {
1847        JITCompiler::Call call = appendCallWithExceptionCheck(function);
1848        m_jit.setupResults(result1, result2);
1849        return call;
1850    }
1851#if CPU(X86)
1852    JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
1853    {
1854        JITCompiler::Call call = appendCallWithExceptionCheck(function);
1855        if (result != InvalidFPRReg) {
1856            m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
1857            m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
1858        }
1859        return call;
1860    }
1861    JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
1862    {
1863        JITCompiler::Call call = appendCall(function);
1864        if (result != InvalidFPRReg) {
1865            m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
1866            m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
1867        }
1868        return call;
1869    }
1870#elif CPU(ARM) && !CPU(ARM_HARDFP)
1871    JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
1872    {
1873        JITCompiler::Call call = appendCallWithExceptionCheck(function);
1874        if (result != InvalidFPRReg)
1875            m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
1876        return call;
1877    }
1878    JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
1879    {
1880        JITCompiler::Call call = appendCall(function);
1881        if (result != InvalidFPRReg)
1882            m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
1883        return call;
1884    }
1885#else // CPU(X86_64) || (CPU(ARM) && CPU(ARM_HARDFP)) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
1886    JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
1887    {
1888        JITCompiler::Call call = appendCallWithExceptionCheck(function);
1889        if (result != InvalidFPRReg)
1890            m_jit.moveDouble(FPRInfo::returnValueFPR, result);
1891        return call;
1892    }
1893    JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
1894    {
1895        JITCompiler::Call call = appendCall(function);
1896        if (result != InvalidFPRReg)
1897            m_jit.moveDouble(FPRInfo::returnValueFPR, result);
1898        return call;
1899    }
1900#endif
1901
1902    void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BasicBlock* destination)
1903    {
1904        return addBranch(m_jit.branchDouble(cond, left, right), destination);
1905    }
1906
1907    void branchDoubleNonZero(FPRReg value, FPRReg scratch, BasicBlock* destination)
1908    {
1909        return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination);
1910    }
1911
1912    template<typename T, typename U>
1913    void branch32(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1914    {
1915        return addBranch(m_jit.branch32(cond, left, right), destination);
1916    }
1917
1918    template<typename T, typename U>
1919    void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
1920    {
1921        return addBranch(m_jit.branchTest32(cond, value, mask), destination);
1922    }
1923
1924    template<typename T>
1925    void branchTest32(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
1926    {
1927        return addBranch(m_jit.branchTest32(cond, value), destination);
1928    }
1929
1930#if USE(JSVALUE64)
1931    template<typename T, typename U>
1932    void branch64(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1933    {
1934        return addBranch(m_jit.branch64(cond, left, right), destination);
1935    }
1936#endif
1937
1938    template<typename T, typename U>
1939    void branch8(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1940    {
1941        return addBranch(m_jit.branch8(cond, left, right), destination);
1942    }
1943
1944    template<typename T, typename U>
1945    void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
1946    {
1947        return addBranch(m_jit.branchPtr(cond, left, right), destination);
1948    }
1949
1950    template<typename T, typename U>
1951    void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
1952    {
1953        return addBranch(m_jit.branchTestPtr(cond, value, mask), destination);
1954    }
1955
1956    template<typename T>
1957    void branchTestPtr(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
1958    {
1959        return addBranch(m_jit.branchTestPtr(cond, value), destination);
1960    }
1961
1962    template<typename T, typename U>
1963    void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
1964    {
1965        return addBranch(m_jit.branchTest8(cond, value, mask), destination);
1966    }
1967
1968    template<typename T>
1969    void branchTest8(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
1970    {
1971        return addBranch(m_jit.branchTest8(cond, value), destination);
1972    }
1973
1974    enum FallThroughMode {
1975        AtFallThroughPoint,
1976        ForceJump
1977    };
1978    void jump(BasicBlock* destination, FallThroughMode fallThroughMode = AtFallThroughPoint)
1979    {
1980        if (destination == nextBlock()
1981            && fallThroughMode == AtFallThroughPoint)
1982            return;
1983        addBranch(m_jit.jump(), destination);
1984    }
1985
1986    void addBranch(const MacroAssembler::Jump& jump, BasicBlock* destination)
1987    {
1988        m_branches.append(BranchRecord(jump, destination));
1989    }
1990    void addBranch(const MacroAssembler::JumpList& jump, BasicBlock* destination);
1991
1992    void linkBranches();
1993
1994    void dump(const char* label = 0);
1995
1996    bool isInteger(Node* node)
1997    {
1998        if (node->hasInt32Result())
1999            return true;
2000
2001        if (isInt32Constant(node))
2002            return true;
2003
2004        return generationInfo(node).isJSInt32();
2005    }
2006
2007    bool betterUseStrictInt52(Node* node)
2008    {
2009        return !generationInfo(node).isInt52();
2010    }
2011    bool betterUseStrictInt52(Edge edge)
2012    {
2013        return betterUseStrictInt52(edge.node());
2014    }
2015
2016    bool compare(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_EJJ);
2017    bool compilePeepHoleBranch(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_EJJ);
2018    void compilePeepHoleInt32Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
2019    void compilePeepHoleInt52Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
2020    void compilePeepHoleBooleanBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
2021    void compilePeepHoleDoubleBranch(Node*, Node* branchNode, JITCompiler::DoubleCondition);
2022    void compilePeepHoleObjectEquality(Node*, Node* branchNode);
2023    void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode);
2024    void compileObjectEquality(Node*);
2025    void compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild);
2026    void compileObjectOrOtherLogicalNot(Edge value);
2027    void compileLogicalNot(Node*);
2028    void compileStringEquality(
2029        Node*, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR,
2030        GPRReg leftTempGPR, GPRReg rightTempGPR, GPRReg leftTemp2GPR,
2031        GPRReg rightTemp2GPR, JITCompiler::JumpList fastTrue,
2032        JITCompiler::JumpList fastSlow);
2033    void compileStringEquality(Node*);
2034    void compileStringIdentEquality(Node*);
2035    void compileStringToUntypedEquality(Node*, Edge stringEdge, Edge untypedEdge);
2036    void compileStringIdentToNotStringVarEquality(Node*, Edge stringEdge, Edge notStringVarEdge);
2037    void compileStringZeroLength(Node*);
2038    void compileMiscStrictEq(Node*);
2039
2040    void emitObjectOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
2041    void emitBranch(Node*);
2042
2043    struct StringSwitchCase {
2044        StringSwitchCase() { }
2045
2046        StringSwitchCase(StringImpl* string, BasicBlock* target)
2047            : string(string)
2048            , target(target)
2049        {
2050        }
2051
2052        bool operator<(const StringSwitchCase& other) const;
2053
2054        StringImpl* string;
2055        BasicBlock* target;
2056    };
2057
2058    void emitSwitchIntJump(SwitchData*, GPRReg value, GPRReg scratch);
2059    void emitSwitchImm(Node*, SwitchData*);
2060    void emitSwitchCharStringJump(SwitchData*, GPRReg value, GPRReg scratch);
2061    void emitSwitchChar(Node*, SwitchData*);
2062    void emitBinarySwitchStringRecurse(
2063        SwitchData*, const Vector<StringSwitchCase>&, unsigned numChecked,
2064        unsigned begin, unsigned end, GPRReg buffer, GPRReg length, GPRReg temp,
2065        unsigned alreadyCheckedLength, bool checkedExactLength);
2066    void emitSwitchStringOnString(SwitchData*, GPRReg string);
2067    void emitSwitchString(Node*, SwitchData*);
2068    void emitSwitch(Node*);
2069
2070    void compileToStringOnCell(Node*);
2071    void compileNewStringObject(Node*);
2072
2073    void compileNewTypedArray(Node*);
2074
2075    void compileInt32Compare(Node*, MacroAssembler::RelationalCondition);
2076    void compileInt52Compare(Node*, MacroAssembler::RelationalCondition);
2077    void compileBooleanCompare(Node*, MacroAssembler::RelationalCondition);
2078    void compileDoubleCompare(Node*, MacroAssembler::DoubleCondition);
2079
2080    bool compileStrictEq(Node*);
2081
2082    void compileAllocatePropertyStorage(Node*);
2083    void compileReallocatePropertyStorage(Node*);
2084
2085#if USE(JSVALUE32_64)
2086    template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType>
2087    void compileContiguousPutByVal(Node*, BaseOperandType&, PropertyOperandType&, ValueOperandType&, GPRReg valuePayloadReg, TagType valueTag);
2088#endif
2089    void compileDoublePutByVal(Node*, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property);
2090    bool putByValWillNeedExtraRegister(ArrayMode arrayMode)
2091    {
2092        return arrayMode.mayStoreToHole();
2093    }
2094    GPRReg temporaryRegisterForPutByVal(GPRTemporary&, ArrayMode);
2095    GPRReg temporaryRegisterForPutByVal(GPRTemporary& temporary, Node* node)
2096    {
2097        return temporaryRegisterForPutByVal(temporary, node->arrayMode());
2098    }
2099
2100    void compileGetCharCodeAt(Node*);
2101    void compileGetByValOnString(Node*);
2102    void compileFromCharCode(Node*);
2103
2104    void compileGetByValOnArguments(Node*);
2105    void compileGetArgumentsLength(Node*);
2106
2107    void compileGetArrayLength(Node*);
2108
2109    void compileValueRep(Node*);
2110    void compileDoubleRep(Node*);
2111
2112    void compileValueToInt32(Node*);
2113    void compileUInt32ToNumber(Node*);
2114    void compileDoubleAsInt32(Node*);
2115    void compileAdd(Node*);
2116    void compileMakeRope(Node*);
2117    void compileArithSub(Node*);
2118    void compileArithNegate(Node*);
2119    void compileArithMul(Node*);
2120    void compileArithDiv(Node*);
2121    void compileArithMod(Node*);
2122    void compileConstantStoragePointer(Node*);
2123    void compileGetIndexedPropertyStorage(Node*);
2124    JITCompiler::Jump jumpForTypedArrayOutOfBounds(Node*, GPRReg baseGPR, GPRReg indexGPR);
2125    void emitTypedArrayBoundsCheck(Node*, GPRReg baseGPR, GPRReg indexGPR);
2126    void compileGetTypedArrayByteOffset(Node*);
2127    void compileGetByValOnIntTypedArray(Node*, TypedArrayType);
2128    void compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
2129    void compileGetByValOnFloatTypedArray(Node*, TypedArrayType);
2130    void compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
2131    void compileNewFunctionNoCheck(Node*);
2132    void compileNewFunctionExpression(Node*);
2133    bool compileRegExpExec(Node*);
2134
2135    JITCompiler::Jump branchIsCell(JSValueRegs);
2136    JITCompiler::Jump branchNotCell(JSValueRegs);
2137    JITCompiler::Jump branchIsOther(JSValueRegs, GPRReg tempGPR);
2138    JITCompiler::Jump branchNotOther(JSValueRegs, GPRReg tempGPR);
2139
2140    void moveTrueTo(GPRReg);
2141    void moveFalseTo(GPRReg);
2142    void blessBoolean(GPRReg);
2143
2144    // size can be an immediate or a register, and must be in bytes. If size is a register,
2145    // it must be a different register than resultGPR. Emits code that place a pointer to
2146    // the end of the allocation. The returned jump is the jump to the slow path.
2147    template<typename SizeType>
2148    MacroAssembler::Jump emitAllocateBasicStorage(SizeType size, GPRReg resultGPR)
2149    {
2150        CopiedAllocator* copiedAllocator = &m_jit.vm()->heap.storageAllocator();
2151
2152        // It's invalid to allocate zero bytes in CopiedSpace.
2153#ifndef NDEBUG
2154        m_jit.move(size, resultGPR);
2155        MacroAssembler::Jump nonZeroSize = m_jit.branchTest32(MacroAssembler::NonZero, resultGPR);
2156        m_jit.abortWithReason(DFGBasicStorageAllocatorZeroSize);
2157        nonZeroSize.link(&m_jit);
2158#endif
2159
2160        m_jit.loadPtr(&copiedAllocator->m_currentRemaining, resultGPR);
2161        MacroAssembler::Jump slowPath = m_jit.branchSubPtr(JITCompiler::Signed, size, resultGPR);
2162        m_jit.storePtr(resultGPR, &copiedAllocator->m_currentRemaining);
2163        m_jit.negPtr(resultGPR);
2164        m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), resultGPR);
2165
2166        return slowPath;
2167    }
2168
2169    // Allocator for a cell of a specific size.
2170    template <typename StructureType> // StructureType can be GPR or ImmPtr.
2171    void emitAllocateJSCell(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure,
2172        GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
2173    {
2174        m_jit.loadPtr(MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()), resultGPR);
2175        slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR));
2176
2177        // The object is half-allocated: we have what we know is a fresh object, but
2178        // it's still on the GC's free list.
2179        m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR);
2180        m_jit.storePtr(scratchGPR, MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()));
2181
2182        // Initialize the object's Structure.
2183        m_jit.emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR);
2184    }
2185
2186    // Allocator for an object of a specific size.
2187    template <typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
2188    void emitAllocateJSObject(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure,
2189        StorageType storage, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
2190    {
2191        emitAllocateJSCell(resultGPR, allocatorGPR, structure, scratchGPR, slowPath);
2192
2193        // Initialize the object's property storage pointer.
2194        m_jit.storePtr(storage, MacroAssembler::Address(resultGPR, JSObject::butterflyOffset()));
2195    }
2196
2197    // Convenience allocator for a built-in object.
2198    template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
2199    void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage,
2200        GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
2201    {
2202        MarkedAllocator* allocator = 0;
2203        size_t size = ClassType::allocationSize(0);
2204        if (ClassType::needsDestruction && ClassType::hasImmortalStructure)
2205            allocator = &m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(size);
2206        else if (ClassType::needsDestruction)
2207            allocator = &m_jit.vm()->heap.allocatorForObjectWithNormalDestructor(size);
2208        else
2209            allocator = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(size);
2210        m_jit.move(TrustedImmPtr(allocator), scratchGPR1);
2211        emitAllocateJSObject(resultGPR, scratchGPR1, structure, storage, scratchGPR2, slowPath);
2212    }
2213
2214    template <typename T>
2215    void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure,
2216        GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
2217    {
2218        emitAllocateJSObject<T>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
2219        m_jit.storePtr(TrustedImmPtr(structure->classInfo()), MacroAssembler::Address(resultGPR, JSDestructibleObject::classInfoOffset()));
2220    }
2221
2222    void emitAllocateJSArray(GPRReg resultGPR, Structure*, GPRReg storageGPR, unsigned numElements);
2223    void emitAllocateArguments(GPRReg resultGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath);
2224
2225    // Add a speculation check.
2226    void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail);
2227    void speculationCheck(ExitKind, JSValueSource, Node*, const MacroAssembler::JumpList& jumpsToFail);
2228
2229    // Add a speculation check without additional recovery, and with a promise to supply a jump later.
2230    OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Node*);
2231    OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Edge);
2232    void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail);
2233    void speculationCheck(ExitKind, JSValueSource, Edge, const MacroAssembler::JumpList& jumpsToFail);
2234    // Add a speculation check with additional recovery.
2235    void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&);
2236    void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&);
2237
2238    void emitInvalidationPoint(Node*);
2239
2240    // Called when we statically determine that a speculation will fail.
2241    void terminateSpeculativeExecution(ExitKind, JSValueRegs, Node*);
2242    void terminateSpeculativeExecution(ExitKind, JSValueRegs, Edge);
2243
2244    // Helpers for performing type checks on an edge stored in the given registers.
2245    bool needsTypeCheck(Edge edge, SpeculatedType typesPassedThrough) { return m_interpreter.needsTypeCheck(edge, typesPassedThrough); }
2246    void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail);
2247
2248    void speculateInt32(Edge);
2249#if USE(JSVALUE64)
2250    void convertMachineInt(Edge, GPRReg resultGPR);
2251    void speculateMachineInt(Edge);
2252    void speculateDoubleRepMachineInt(Edge);
2253#endif // USE(JSVALUE64)
2254    void speculateNumber(Edge);
2255    void speculateDoubleReal(Edge);
2256    void speculateBoolean(Edge);
2257    void speculateCell(Edge);
2258    void speculateObject(Edge);
2259    void speculateFinalObject(Edge);
2260    void speculateObjectOrOther(Edge);
2261    void speculateString(Edge edge, GPRReg cell);
2262    void speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage);
2263    void speculateStringIdent(Edge edge, GPRReg string);
2264    void speculateStringIdent(Edge);
2265    void speculateString(Edge);
2266    void speculateNotStringVar(Edge);
2267    template<typename StructureLocationType>
2268    void speculateStringObjectForStructure(Edge, StructureLocationType);
2269    void speculateStringObject(Edge, GPRReg);
2270    void speculateStringObject(Edge);
2271    void speculateStringOrStringObject(Edge);
2272    void speculateNotCell(Edge);
2273    void speculateOther(Edge);
2274    void speculateMisc(Edge, JSValueRegs);
2275    void speculateMisc(Edge);
2276    void speculate(Node*, Edge);
2277
2278    JITCompiler::Jump jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode, IndexingType);
2279    JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode);
2280    void checkArray(Node*);
2281    void arrayify(Node*, GPRReg baseReg, GPRReg propertyReg);
2282    void arrayify(Node*);
2283
2284    template<bool strict>
2285    GPRReg fillSpeculateInt32Internal(Edge, DataFormat& returnFormat);
2286
2287    // It is possible, during speculative generation, to reach a situation in which we
2288    // can statically determine a speculation will fail (for example, when two nodes
2289    // will make conflicting speculations about the same operand). In such cases this
2290    // flag is cleared, indicating no further code generation should take place.
2291    bool m_compileOkay;
2292
2293    void recordSetLocal(
2294        VirtualRegister bytecodeReg, VirtualRegister machineReg, DataFormat format)
2295    {
2296        m_stream->appendAndLog(VariableEvent::setLocal(bytecodeReg, machineReg, format));
2297    }
2298
2299    void recordSetLocal(DataFormat format)
2300    {
2301        VariableAccessData* variable = m_currentNode->variableAccessData();
2302        recordSetLocal(variable->local(), variable->machineLocal(), format);
2303    }
2304
2305    GenerationInfo& generationInfoFromVirtualRegister(VirtualRegister virtualRegister)
2306    {
2307        return m_generationInfo[virtualRegister.toLocal()];
2308    }
2309
2310    GenerationInfo& generationInfo(Node* node)
2311    {
2312        return generationInfoFromVirtualRegister(node->virtualRegister());
2313    }
2314
2315    GenerationInfo& generationInfo(Edge edge)
2316    {
2317        return generationInfo(edge.node());
2318    }
2319
2320    // The JIT, while also provides MacroAssembler functionality.
2321    JITCompiler& m_jit;
2322
2323    // The current node being generated.
2324    BasicBlock* m_block;
2325    Node* m_currentNode;
2326    NodeType m_lastGeneratedNode;
2327    bool m_canExit;
2328    unsigned m_indexInBlock;
2329    // Virtual and physical register maps.
2330    Vector<GenerationInfo, 32> m_generationInfo;
2331    RegisterBank<GPRInfo> m_gprs;
2332    RegisterBank<FPRInfo> m_fprs;
2333
2334    Vector<MacroAssembler::Label> m_osrEntryHeads;
2335
2336    struct BranchRecord {
2337        BranchRecord(MacroAssembler::Jump jump, BasicBlock* destination)
2338            : jump(jump)
2339            , destination(destination)
2340        {
2341        }
2342
2343        MacroAssembler::Jump jump;
2344        BasicBlock* destination;
2345    };
2346    Vector<BranchRecord, 8> m_branches;
2347
2348    CodeOrigin m_codeOriginForExitTarget;
2349    CodeOrigin m_codeOriginForExitProfile;
2350
2351    InPlaceAbstractState m_state;
2352    AbstractInterpreter<InPlaceAbstractState> m_interpreter;
2353
2354    VariableEventStream* m_stream;
2355    MinifiedGraph* m_minifiedGraph;
2356
2357    bool m_isCheckingArgumentTypes;
2358
2359    Vector<OwnPtr<SlowPathGenerator>, 8> m_slowPathGenerators;
2360    Vector<SilentRegisterSavePlan> m_plans;
2361};
2362
2363
2364// === Operand types ===
2365//
2366// These classes are used to lock the operands to a node into machine
2367// registers. These classes implement of pattern of locking a value
2368// into register at the point of construction only if it is already in
2369// registers, and otherwise loading it lazily at the point it is first
2370// used. We do so in order to attempt to avoid spilling one operand
2371// in order to make space available for another.
2372
2373class JSValueOperand {
2374public:
2375    explicit JSValueOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
2376        : m_jit(jit)
2377        , m_edge(edge)
2378#if USE(JSVALUE64)
2379        , m_gprOrInvalid(InvalidGPRReg)
2380#elif USE(JSVALUE32_64)
2381        , m_isDouble(false)
2382#endif
2383    {
2384        ASSERT(m_jit);
2385        ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse);
2386#if USE(JSVALUE64)
2387        if (jit->isFilled(node()))
2388            gpr();
2389#elif USE(JSVALUE32_64)
2390        m_register.pair.tagGPR = InvalidGPRReg;
2391        m_register.pair.payloadGPR = InvalidGPRReg;
2392        if (jit->isFilled(node()))
2393            fill();
2394#endif
2395    }
2396
2397    ~JSValueOperand()
2398    {
2399#if USE(JSVALUE64)
2400        ASSERT(m_gprOrInvalid != InvalidGPRReg);
2401        m_jit->unlock(m_gprOrInvalid);
2402#elif USE(JSVALUE32_64)
2403        if (m_isDouble) {
2404            ASSERT(m_register.fpr != InvalidFPRReg);
2405            m_jit->unlock(m_register.fpr);
2406        } else {
2407            ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg);
2408            m_jit->unlock(m_register.pair.tagGPR);
2409            m_jit->unlock(m_register.pair.payloadGPR);
2410        }
2411#endif
2412    }
2413
2414    Edge edge() const
2415    {
2416        return m_edge;
2417    }
2418
2419    Node* node() const
2420    {
2421        return edge().node();
2422    }
2423
2424#if USE(JSVALUE64)
2425    GPRReg gpr()
2426    {
2427        if (m_gprOrInvalid == InvalidGPRReg)
2428            m_gprOrInvalid = m_jit->fillJSValue(m_edge);
2429        return m_gprOrInvalid;
2430    }
2431    JSValueRegs jsValueRegs()
2432    {
2433        return JSValueRegs(gpr());
2434    }
2435#elif USE(JSVALUE32_64)
2436    bool isDouble() { return m_isDouble; }
2437
2438    void fill()
2439    {
2440        if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg)
2441            m_isDouble = !m_jit->fillJSValue(m_edge, m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr);
2442    }
2443
2444    GPRReg tagGPR()
2445    {
2446        fill();
2447        ASSERT(!m_isDouble);
2448        return m_register.pair.tagGPR;
2449    }
2450
2451    GPRReg payloadGPR()
2452    {
2453        fill();
2454        ASSERT(!m_isDouble);
2455        return m_register.pair.payloadGPR;
2456    }
2457
2458    JSValueRegs jsValueRegs()
2459    {
2460        return JSValueRegs(tagGPR(), payloadGPR());
2461    }
2462
2463    GPRReg gpr(WhichValueWord which)
2464    {
2465        return jsValueRegs().gpr(which);
2466    }
2467
2468    FPRReg fpr()
2469    {
2470        fill();
2471        ASSERT(m_isDouble);
2472        return m_register.fpr;
2473    }
2474#endif
2475
2476    void use()
2477    {
2478        m_jit->use(node());
2479    }
2480
2481private:
2482    SpeculativeJIT* m_jit;
2483    Edge m_edge;
2484#if USE(JSVALUE64)
2485    GPRReg m_gprOrInvalid;
2486#elif USE(JSVALUE32_64)
2487    union {
2488        struct {
2489            GPRReg tagGPR;
2490            GPRReg payloadGPR;
2491        } pair;
2492        FPRReg fpr;
2493    } m_register;
2494    bool m_isDouble;
2495#endif
2496};
2497
2498class StorageOperand {
2499public:
2500    explicit StorageOperand(SpeculativeJIT* jit, Edge edge)
2501        : m_jit(jit)
2502        , m_edge(edge)
2503        , m_gprOrInvalid(InvalidGPRReg)
2504    {
2505        ASSERT(m_jit);
2506        ASSERT(edge.useKind() == UntypedUse || edge.useKind() == KnownCellUse);
2507        if (jit->isFilled(node()))
2508            gpr();
2509    }
2510
2511    ~StorageOperand()
2512    {
2513        ASSERT(m_gprOrInvalid != InvalidGPRReg);
2514        m_jit->unlock(m_gprOrInvalid);
2515    }
2516
2517    Edge edge() const
2518    {
2519        return m_edge;
2520    }
2521
2522    Node* node() const
2523    {
2524        return edge().node();
2525    }
2526
2527    GPRReg gpr()
2528    {
2529        if (m_gprOrInvalid == InvalidGPRReg)
2530            m_gprOrInvalid = m_jit->fillStorage(edge());
2531        return m_gprOrInvalid;
2532    }
2533
2534    void use()
2535    {
2536        m_jit->use(node());
2537    }
2538
2539private:
2540    SpeculativeJIT* m_jit;
2541    Edge m_edge;
2542    GPRReg m_gprOrInvalid;
2543};
2544
2545
2546// === Temporaries ===
2547//
2548// These classes are used to allocate temporary registers.
2549// A mechanism is provided to attempt to reuse the registers
2550// currently allocated to child nodes whose value is consumed
2551// by, and not live after, this operation.
2552
2553enum ReuseTag { Reuse };
2554
2555class GPRTemporary {
2556public:
2557    GPRTemporary();
2558    GPRTemporary(SpeculativeJIT*);
2559    GPRTemporary(SpeculativeJIT*, GPRReg specific);
2560    template<typename T>
2561    GPRTemporary(SpeculativeJIT* jit, ReuseTag, T& operand)
2562        : m_jit(jit)
2563        , m_gpr(InvalidGPRReg)
2564    {
2565        if (m_jit->canReuse(operand.node()))
2566            m_gpr = m_jit->reuse(operand.gpr());
2567        else
2568            m_gpr = m_jit->allocate();
2569    }
2570    template<typename T1, typename T2>
2571    GPRTemporary(SpeculativeJIT* jit, ReuseTag, T1& op1, T2& op2)
2572        : m_jit(jit)
2573        , m_gpr(InvalidGPRReg)
2574    {
2575        if (m_jit->canReuse(op1.node()))
2576            m_gpr = m_jit->reuse(op1.gpr());
2577        else if (m_jit->canReuse(op2.node()))
2578            m_gpr = m_jit->reuse(op2.gpr());
2579        else
2580            m_gpr = m_jit->allocate();
2581    }
2582#if USE(JSVALUE32_64)
2583    GPRTemporary(SpeculativeJIT*, ReuseTag, JSValueOperand&, WhichValueWord);
2584#endif
2585
2586    void adopt(GPRTemporary&);
2587
2588    ~GPRTemporary()
2589    {
2590        if (m_jit && m_gpr != InvalidGPRReg)
2591            m_jit->unlock(gpr());
2592    }
2593
2594    GPRReg gpr()
2595    {
2596        return m_gpr;
2597    }
2598
2599private:
2600    SpeculativeJIT* m_jit;
2601    GPRReg m_gpr;
2602};
2603
2604class JSValueRegsTemporary {
2605public:
2606    JSValueRegsTemporary();
2607    JSValueRegsTemporary(SpeculativeJIT*);
2608    ~JSValueRegsTemporary();
2609
2610    JSValueRegs regs();
2611
2612private:
2613#if USE(JSVALUE64)
2614    GPRTemporary m_gpr;
2615#else
2616    GPRTemporary m_payloadGPR;
2617    GPRTemporary m_tagGPR;
2618#endif
2619};
2620
2621class FPRTemporary {
2622public:
2623    FPRTemporary(SpeculativeJIT*);
2624    FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&);
2625    FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&);
2626#if USE(JSVALUE32_64)
2627    FPRTemporary(SpeculativeJIT*, JSValueOperand&);
2628#endif
2629
2630    ~FPRTemporary()
2631    {
2632        m_jit->unlock(fpr());
2633    }
2634
2635    FPRReg fpr() const
2636    {
2637        ASSERT(m_fpr != InvalidFPRReg);
2638        return m_fpr;
2639    }
2640
2641protected:
2642    FPRTemporary(SpeculativeJIT* jit, FPRReg lockedFPR)
2643        : m_jit(jit)
2644        , m_fpr(lockedFPR)
2645    {
2646    }
2647
2648private:
2649    SpeculativeJIT* m_jit;
2650    FPRReg m_fpr;
2651};
2652
2653
2654// === Results ===
2655//
2656// These classes lock the result of a call to a C++ helper function.
2657
2658class GPRResult : public GPRTemporary {
2659public:
2660    GPRResult(SpeculativeJIT* jit)
2661        : GPRTemporary(jit, GPRInfo::returnValueGPR)
2662    {
2663    }
2664};
2665
2666#if USE(JSVALUE32_64)
2667class GPRResult2 : public GPRTemporary {
2668public:
2669    GPRResult2(SpeculativeJIT* jit)
2670        : GPRTemporary(jit, GPRInfo::returnValueGPR2)
2671    {
2672    }
2673};
2674#endif
2675
2676class FPRResult : public FPRTemporary {
2677public:
2678    FPRResult(SpeculativeJIT* jit)
2679        : FPRTemporary(jit, lockedResult(jit))
2680    {
2681    }
2682
2683private:
2684    static FPRReg lockedResult(SpeculativeJIT* jit)
2685    {
2686        jit->lock(FPRInfo::returnValueFPR);
2687        return FPRInfo::returnValueFPR;
2688    }
2689};
2690
2691
2692// === Speculative Operand types ===
2693//
2694// SpeculateInt32Operand, SpeculateStrictInt32Operand and SpeculateCellOperand.
2695//
2696// These are used to lock the operands to a node into machine registers within the
2697// SpeculativeJIT. The classes operate like those above, however these will
2698// perform a speculative check for a more restrictive type than we can statically
2699// determine the operand to have. If the operand does not have the requested type,
2700// a bail-out to the non-speculative path will be taken.
2701
2702class SpeculateInt32Operand {
2703public:
2704    explicit SpeculateInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
2705        : m_jit(jit)
2706        , m_edge(edge)
2707        , m_gprOrInvalid(InvalidGPRReg)
2708#ifndef NDEBUG
2709        , m_format(DataFormatNone)
2710#endif
2711    {
2712        ASSERT(m_jit);
2713        ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
2714        if (jit->isFilled(node()))
2715            gpr();
2716    }
2717
2718    ~SpeculateInt32Operand()
2719    {
2720        ASSERT(m_gprOrInvalid != InvalidGPRReg);
2721        m_jit->unlock(m_gprOrInvalid);
2722    }
2723
2724    Edge edge() const
2725    {
2726        return m_edge;
2727    }
2728
2729    Node* node() const
2730    {
2731        return edge().node();
2732    }
2733
2734    DataFormat format()
2735    {
2736        gpr(); // m_format is set when m_gpr is locked.
2737        ASSERT(m_format == DataFormatInt32 || m_format == DataFormatJSInt32);
2738        return m_format;
2739    }
2740
2741    GPRReg gpr()
2742    {
2743        if (m_gprOrInvalid == InvalidGPRReg)
2744            m_gprOrInvalid = m_jit->fillSpeculateInt32(edge(), m_format);
2745        return m_gprOrInvalid;
2746    }
2747
2748    void use()
2749    {
2750        m_jit->use(node());
2751    }
2752
2753private:
2754    SpeculativeJIT* m_jit;
2755    Edge m_edge;
2756    GPRReg m_gprOrInvalid;
2757    DataFormat m_format;
2758};
2759
2760class SpeculateStrictInt32Operand {
2761public:
2762    explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
2763        : m_jit(jit)
2764        , m_edge(edge)
2765        , m_gprOrInvalid(InvalidGPRReg)
2766    {
2767        ASSERT(m_jit);
2768        ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
2769        if (jit->isFilled(node()))
2770            gpr();
2771    }
2772
2773    ~SpeculateStrictInt32Operand()
2774    {
2775        ASSERT(m_gprOrInvalid != InvalidGPRReg);
2776        m_jit->unlock(m_gprOrInvalid);
2777    }
2778
2779    Edge edge() const
2780    {
2781        return m_edge;
2782    }
2783
2784    Node* node() const
2785    {
2786        return edge().node();
2787    }
2788
2789    GPRReg gpr()
2790    {
2791        if (m_gprOrInvalid == InvalidGPRReg)
2792            m_gprOrInvalid = m_jit->fillSpeculateInt32Strict(edge());
2793        return m_gprOrInvalid;
2794    }
2795
2796    void use()
2797    {
2798        m_jit->use(node());
2799    }
2800
2801private:
2802    SpeculativeJIT* m_jit;
2803    Edge m_edge;
2804    GPRReg m_gprOrInvalid;
2805};
2806
2807// Gives you a canonical Int52 (i.e. it's left-shifted by 16, low bits zero).
2808class SpeculateInt52Operand {
2809public:
2810    explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge)
2811        : m_jit(jit)
2812        , m_edge(edge)
2813        , m_gprOrInvalid(InvalidGPRReg)
2814    {
2815        RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2816        if (jit->isFilled(node()))
2817            gpr();
2818    }
2819
2820    ~SpeculateInt52Operand()
2821    {
2822        ASSERT(m_gprOrInvalid != InvalidGPRReg);
2823        m_jit->unlock(m_gprOrInvalid);
2824    }
2825
2826    Edge edge() const
2827    {
2828        return m_edge;
2829    }
2830
2831    Node* node() const
2832    {
2833        return edge().node();
2834    }
2835
2836    GPRReg gpr()
2837    {
2838        if (m_gprOrInvalid == InvalidGPRReg)
2839            m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatInt52);
2840        return m_gprOrInvalid;
2841    }
2842
2843    void use()
2844    {
2845        m_jit->use(node());
2846    }
2847
2848private:
2849    SpeculativeJIT* m_jit;
2850    Edge m_edge;
2851    GPRReg m_gprOrInvalid;
2852};
2853
2854// Gives you a strict Int52 (i.e. the payload is in the low 48 bits, high 16 bits are sign-extended).
2855class SpeculateStrictInt52Operand {
2856public:
2857    explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge)
2858        : m_jit(jit)
2859        , m_edge(edge)
2860        , m_gprOrInvalid(InvalidGPRReg)
2861    {
2862        RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2863        if (jit->isFilled(node()))
2864            gpr();
2865    }
2866
2867    ~SpeculateStrictInt52Operand()
2868    {
2869        ASSERT(m_gprOrInvalid != InvalidGPRReg);
2870        m_jit->unlock(m_gprOrInvalid);
2871    }
2872
2873    Edge edge() const
2874    {
2875        return m_edge;
2876    }
2877
2878    Node* node() const
2879    {
2880        return edge().node();
2881    }
2882
2883    GPRReg gpr()
2884    {
2885        if (m_gprOrInvalid == InvalidGPRReg)
2886            m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatStrictInt52);
2887        return m_gprOrInvalid;
2888    }
2889
2890    void use()
2891    {
2892        m_jit->use(node());
2893    }
2894
2895private:
2896    SpeculativeJIT* m_jit;
2897    Edge m_edge;
2898    GPRReg m_gprOrInvalid;
2899};
2900
2901enum OppositeShiftTag { OppositeShift };
2902
2903class SpeculateWhicheverInt52Operand {
2904public:
2905    explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge)
2906        : m_jit(jit)
2907        , m_edge(edge)
2908        , m_gprOrInvalid(InvalidGPRReg)
2909        , m_strict(jit->betterUseStrictInt52(edge))
2910    {
2911        RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2912        if (jit->isFilled(node()))
2913            gpr();
2914    }
2915
2916    explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other)
2917        : m_jit(jit)
2918        , m_edge(edge)
2919        , m_gprOrInvalid(InvalidGPRReg)
2920        , m_strict(other.m_strict)
2921    {
2922        RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2923        if (jit->isFilled(node()))
2924            gpr();
2925    }
2926
2927    explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other)
2928        : m_jit(jit)
2929        , m_edge(edge)
2930        , m_gprOrInvalid(InvalidGPRReg)
2931        , m_strict(!other.m_strict)
2932    {
2933        RELEASE_ASSERT(edge.useKind() == Int52RepUse);
2934        if (jit->isFilled(node()))
2935            gpr();
2936    }
2937
2938    ~SpeculateWhicheverInt52Operand()
2939    {
2940        ASSERT(m_gprOrInvalid != InvalidGPRReg);
2941        m_jit->unlock(m_gprOrInvalid);
2942    }
2943
2944    Edge edge() const
2945    {
2946        return m_edge;
2947    }
2948
2949    Node* node() const
2950    {
2951        return edge().node();
2952    }
2953
2954    GPRReg gpr()
2955    {
2956        if (m_gprOrInvalid == InvalidGPRReg) {
2957            m_gprOrInvalid = m_jit->fillSpeculateInt52(
2958                edge(), m_strict ? DataFormatStrictInt52 : DataFormatInt52);
2959        }
2960        return m_gprOrInvalid;
2961    }
2962
2963    void use()
2964    {
2965        m_jit->use(node());
2966    }
2967
2968    DataFormat format() const
2969    {
2970        return m_strict ? DataFormatStrictInt52 : DataFormatInt52;
2971    }
2972
2973private:
2974    SpeculativeJIT* m_jit;
2975    Edge m_edge;
2976    GPRReg m_gprOrInvalid;
2977    bool m_strict;
2978};
2979
2980class SpeculateDoubleOperand {
2981public:
2982    explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge)
2983        : m_jit(jit)
2984        , m_edge(edge)
2985        , m_fprOrInvalid(InvalidFPRReg)
2986    {
2987        ASSERT(m_jit);
2988        RELEASE_ASSERT(isDouble(edge.useKind()));
2989        if (jit->isFilled(node()))
2990            fpr();
2991    }
2992
2993    ~SpeculateDoubleOperand()
2994    {
2995        ASSERT(m_fprOrInvalid != InvalidFPRReg);
2996        m_jit->unlock(m_fprOrInvalid);
2997    }
2998
2999    Edge edge() const
3000    {
3001        return m_edge;
3002    }
3003
3004    Node* node() const
3005    {
3006        return edge().node();
3007    }
3008
3009    FPRReg fpr()
3010    {
3011        if (m_fprOrInvalid == InvalidFPRReg)
3012            m_fprOrInvalid = m_jit->fillSpeculateDouble(edge());
3013        return m_fprOrInvalid;
3014    }
3015
3016    void use()
3017    {
3018        m_jit->use(node());
3019    }
3020
3021private:
3022    SpeculativeJIT* m_jit;
3023    Edge m_edge;
3024    FPRReg m_fprOrInvalid;
3025};
3026
3027class SpeculateCellOperand {
3028public:
3029    explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
3030        : m_jit(jit)
3031        , m_edge(edge)
3032        , m_gprOrInvalid(InvalidGPRReg)
3033    {
3034        ASSERT(m_jit);
3035        if (!edge)
3036            return;
3037        ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || isCell(edge.useKind()));
3038        if (jit->isFilled(node()))
3039            gpr();
3040    }
3041
3042    ~SpeculateCellOperand()
3043    {
3044        if (!m_edge)
3045            return;
3046        ASSERT(m_gprOrInvalid != InvalidGPRReg);
3047        m_jit->unlock(m_gprOrInvalid);
3048    }
3049
3050    Edge edge() const
3051    {
3052        return m_edge;
3053    }
3054
3055    Node* node() const
3056    {
3057        return edge().node();
3058    }
3059
3060    GPRReg gpr()
3061    {
3062        ASSERT(m_edge);
3063        if (m_gprOrInvalid == InvalidGPRReg)
3064            m_gprOrInvalid = m_jit->fillSpeculateCell(edge());
3065        return m_gprOrInvalid;
3066    }
3067
3068    void use()
3069    {
3070        ASSERT(m_edge);
3071        m_jit->use(node());
3072    }
3073
3074private:
3075    SpeculativeJIT* m_jit;
3076    Edge m_edge;
3077    GPRReg m_gprOrInvalid;
3078};
3079
3080class SpeculateBooleanOperand {
3081public:
3082    explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
3083        : m_jit(jit)
3084        , m_edge(edge)
3085        , m_gprOrInvalid(InvalidGPRReg)
3086    {
3087        ASSERT(m_jit);
3088        ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse);
3089        if (jit->isFilled(node()))
3090            gpr();
3091    }
3092
3093    ~SpeculateBooleanOperand()
3094    {
3095        ASSERT(m_gprOrInvalid != InvalidGPRReg);
3096        m_jit->unlock(m_gprOrInvalid);
3097    }
3098
3099    Edge edge() const
3100    {
3101        return m_edge;
3102    }
3103
3104    Node* node() const
3105    {
3106        return edge().node();
3107    }
3108
3109    GPRReg gpr()
3110    {
3111        if (m_gprOrInvalid == InvalidGPRReg)
3112            m_gprOrInvalid = m_jit->fillSpeculateBoolean(edge());
3113        return m_gprOrInvalid;
3114    }
3115
3116    void use()
3117    {
3118        m_jit->use(node());
3119    }
3120
3121private:
3122    SpeculativeJIT* m_jit;
3123    Edge m_edge;
3124    GPRReg m_gprOrInvalid;
3125};
3126
3127template<typename StructureLocationType>
3128void SpeculativeJIT::speculateStringObjectForStructure(Edge edge, StructureLocationType structureLocation)
3129{
3130    Structure* stringObjectStructure =
3131        m_jit.globalObjectFor(m_currentNode->origin.semantic)->stringObjectStructure();
3132
3133    if (!m_state.forNode(edge).m_currentKnownStructure.isSubsetOf(StructureSet(stringObjectStructure))) {
3134        speculationCheck(
3135            NotStringObject, JSValueRegs(), 0,
3136            m_jit.branchStructurePtr(
3137                JITCompiler::NotEqual, structureLocation, stringObjectStructure));
3138    }
3139}
3140
3141#define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) do { \
3142        JSValueSource _dtc_source = (source);                           \
3143        Edge _dtc_edge = (edge);                                        \
3144        SpeculatedType _dtc_typesPassedThrough = typesPassedThrough;    \
3145        if (!needsTypeCheck(_dtc_edge, _dtc_typesPassedThrough))        \
3146            break;                                                      \
3147        typeCheck(_dtc_source, _dtc_edge, _dtc_typesPassedThrough, (jumpToFail)); \
3148    } while (0)
3149
3150} } // namespace JSC::DFG
3151
3152#endif
3153#endif
3154
3155