1/*
2 * Copyright (C) 2011 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSRExitCompiler.h"
28
29#if ENABLE(DFG_JIT) && USE(JSVALUE32_64)
30
31#include "DFGOperations.h"
32#include "Operations.h"
33#include <wtf/DataLog.h>
34
35namespace JSC { namespace DFG {
36
37void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
38{
39    // 1) Pro-forma stuff.
40#if DFG_ENABLE(DEBUG_VERBOSE)
41    dataLogF("OSR exit (");
42    for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
43        dataLogF("bc#%u", codeOrigin.bytecodeIndex);
44        if (!codeOrigin.inlineCallFrame)
45            break;
46        dataLogF(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
47    }
48    dataLogF(") at JIT offset 0x%x  ", m_jit.debugOffset());
49    dumpOperands(operands, WTF::dataFile());
50#endif
51
52    if (Options::printEachOSRExit()) {
53        SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
54        debugInfo->codeBlock = m_jit.codeBlock();
55
56        m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
57    }
58
59#if DFG_ENABLE(JIT_BREAK_ON_SPECULATION_FAILURE)
60    m_jit.breakpoint();
61#endif
62
63#if DFG_ENABLE(SUCCESS_STATS)
64    static SamplingCounter counter("SpeculationFailure");
65    m_jit.emitCount(counter);
66#endif
67
68    // 2) Perform speculation recovery. This only comes into play when an operation
69    //    starts mutating state before verifying the speculation it has already made.
70
71    if (recovery) {
72        switch (recovery->type()) {
73        case SpeculativeAdd:
74            m_jit.sub32(recovery->src(), recovery->dest());
75            break;
76
77        case BooleanSpeculationCheck:
78            break;
79
80        default:
81            break;
82        }
83    }
84
85    // 3) Refine some value profile, if appropriate.
86
87    if (!!exit.m_jsValueSource) {
88        if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
89            // If the instruction that this originated from has an array profile, then
90            // refine it. If it doesn't, then do nothing. The latter could happen for
91            // hoisted checks, or checks emitted for operations that didn't have array
92            // profiling - either ops that aren't array accesses at all, or weren't
93            // known to be array acceses in the bytecode. The latter case is a FIXME
94            // while the former case is an outcome of a CheckStructure not knowing why
95            // it was emitted (could be either due to an inline cache of a property
96            // property access, or due to an array profile).
97
98            // Note: We are free to assume that the jsValueSource is already known to
99            // be a cell since both BadCache and BadIndexingType exits occur after
100            // the cell check would have already happened.
101
102            CodeOrigin codeOrigin = exit.m_codeOriginForExitProfile;
103            if (ArrayProfile* arrayProfile = m_jit.baselineCodeBlockFor(codeOrigin)->getArrayProfile(codeOrigin.bytecodeIndex)) {
104                GPRReg usedRegister1;
105                GPRReg usedRegister2;
106                if (exit.m_jsValueSource.isAddress()) {
107                    usedRegister1 = exit.m_jsValueSource.base();
108                    usedRegister2 = InvalidGPRReg;
109                } else {
110                    usedRegister1 = exit.m_jsValueSource.payloadGPR();
111                    if (exit.m_jsValueSource.hasKnownTag())
112                        usedRegister2 = InvalidGPRReg;
113                    else
114                        usedRegister2 = exit.m_jsValueSource.tagGPR();
115                }
116
117                GPRReg scratch1;
118                GPRReg scratch2;
119                scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
120                scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
121
122                m_jit.push(scratch1);
123                m_jit.push(scratch2);
124
125                GPRReg value;
126                if (exit.m_jsValueSource.isAddress()) {
127                    value = scratch1;
128                    m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), value);
129                } else
130                    value = exit.m_jsValueSource.payloadGPR();
131
132                m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
133                m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
134                m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
135                m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
136                m_jit.lshift32(scratch1, scratch2);
137                m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
138
139                m_jit.pop(scratch2);
140                m_jit.pop(scratch1);
141            }
142        }
143
144        if (!!exit.m_valueProfile) {
145            EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
146
147            if (exit.m_jsValueSource.isAddress()) {
148                // Save a register so we can use it.
149                GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
150
151                m_jit.push(scratch);
152
153                m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
154                m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
155                m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
156                m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
157
158                m_jit.pop(scratch);
159            } else if (exit.m_jsValueSource.hasKnownTag()) {
160                m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
161                m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
162            } else {
163                m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
164                m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
165            }
166        }
167    }
168
169    // 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
170    //    whose destination is now occupied by a DFG virtual register, and we need
171    //    one for every displaced virtual register if there are more than
172    //    GPRInfo::numberOfRegisters of them. Also see if there are any constants,
173    //    any undefined slots, any FPR slots, and any unboxed ints.
174
175    Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals());
176    for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
177        poisonedVirtualRegisters[i] = false;
178
179    unsigned numberOfPoisonedVirtualRegisters = 0;
180    unsigned numberOfDisplacedVirtualRegisters = 0;
181
182    // Booleans for fast checks. We expect that most OSR exits do not have to rebox
183    // Int32s, have no FPRs, and have no constants. If there are constants, we
184    // expect most of them to be jsUndefined(); if that's true then we handle that
185    // specially to minimize code size and execution time.
186    bool haveUnboxedInt32InJSStack = false;
187    bool haveUnboxedCellInJSStack = false;
188    bool haveUnboxedBooleanInJSStack = false;
189    bool haveUInt32s = false;
190    bool haveFPRs = false;
191    bool haveConstants = false;
192    bool haveUndefined = false;
193    bool haveArguments = false;
194
195    for (size_t index = 0; index < operands.size(); ++index) {
196        const ValueRecovery& recovery = operands[index];
197        switch (recovery.technique()) {
198        case DisplacedInJSStack:
199        case Int32DisplacedInJSStack:
200        case CellDisplacedInJSStack:
201        case BooleanDisplacedInJSStack:
202            numberOfDisplacedVirtualRegisters++;
203            ASSERT((int)recovery.virtualRegister() >= 0);
204
205            // See if we might like to store to this virtual register before doing
206            // virtual register shuffling. If so, we say that the virtual register
207            // is poisoned: it cannot be stored to until after displaced virtual
208            // registers are handled. We track poisoned virtual register carefully
209            // to ensure this happens efficiently. Note that we expect this case
210            // to be rare, so the handling of it is optimized for the cases in
211            // which it does not happen.
212            if (recovery.virtualRegister() < (int)operands.numberOfLocals()) {
213                switch (operands.local(recovery.virtualRegister()).technique()) {
214                case InGPR:
215                case UnboxedInt32InGPR:
216                case UnboxedBooleanInGPR:
217                case UInt32InGPR:
218                case InPair:
219                case InFPR:
220                    if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
221                        poisonedVirtualRegisters[recovery.virtualRegister()] = true;
222                        numberOfPoisonedVirtualRegisters++;
223                    }
224                    break;
225                default:
226                    break;
227                }
228            }
229            break;
230
231        case UInt32InGPR:
232            haveUInt32s = true;
233            break;
234
235        case AlreadyInJSStackAsUnboxedInt32:
236            haveUnboxedInt32InJSStack = true;
237            break;
238
239        case AlreadyInJSStackAsUnboxedCell:
240            haveUnboxedCellInJSStack = true;
241            break;
242
243        case AlreadyInJSStackAsUnboxedBoolean:
244            haveUnboxedBooleanInJSStack = true;
245            break;
246
247        case InFPR:
248            haveFPRs = true;
249            break;
250
251        case Constant:
252            haveConstants = true;
253            if (recovery.constant().isUndefined())
254                haveUndefined = true;
255            break;
256
257        case ArgumentsThatWereNotCreated:
258            haveArguments = true;
259            break;
260
261        default:
262            break;
263        }
264    }
265
266    unsigned scratchBufferLengthBeforeUInt32s = numberOfPoisonedVirtualRegisters + ((numberOfDisplacedVirtualRegisters * 2) <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters);
267    ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * (scratchBufferLengthBeforeUInt32s + (haveUInt32s ? 2 : 0)));
268    EncodedJSValue* scratchDataBuffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
269
270    // From here on, the code assumes that it is profitable to maximize the distance
271    // between when something is computed and when it is stored.
272
273    // 5) Perform all reboxing of integers and cells, except for those in registers.
274
275    if (haveUnboxedInt32InJSStack || haveUnboxedCellInJSStack || haveUnboxedBooleanInJSStack) {
276        for (size_t index = 0; index < operands.size(); ++index) {
277            const ValueRecovery& recovery = operands[index];
278            switch (recovery.technique()) {
279            case AlreadyInJSStackAsUnboxedInt32:
280                m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
281                break;
282
283            case AlreadyInJSStackAsUnboxedCell:
284                m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
285                break;
286
287            case AlreadyInJSStackAsUnboxedBoolean:
288                m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index))));
289                break;
290
291            default:
292                break;
293            }
294        }
295    }
296
297    // 6) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
298    //    Note that GPRs do not have a fast change (like haveFPRs) because we expect that
299    //    most OSR failure points will have at least one GPR that needs to be dumped.
300
301    initializePoisoned(operands.numberOfLocals());
302    unsigned currentPoisonIndex = 0;
303
304    for (size_t index = 0; index < operands.size(); ++index) {
305        const ValueRecovery& recovery = operands[index];
306        int operand = operands.operandForIndex(index);
307        switch (recovery.technique()) {
308        case InGPR:
309        case UnboxedInt32InGPR:
310        case UnboxedBooleanInGPR:
311            if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
312                m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
313                m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
314                currentPoisonIndex++;
315            } else {
316                uint32_t tag = JSValue::EmptyValueTag;
317                if (recovery.technique() == InGPR)
318                    tag = JSValue::CellTag;
319                else if (recovery.technique() == UnboxedInt32InGPR)
320                    tag = JSValue::Int32Tag;
321                else
322                    tag = JSValue::BooleanTag;
323                m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
324                m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
325            }
326            break;
327        case InPair:
328            if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
329                m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
330                m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
331                m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
332                currentPoisonIndex++;
333            } else {
334                m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand));
335                m_jit.store32(recovery.payloadGPR(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
336            }
337            break;
338        case UInt32InGPR: {
339            EncodedJSValue* myScratch = scratchDataBuffer + scratchBufferLengthBeforeUInt32s;
340
341            GPRReg addressGPR = GPRInfo::regT0;
342            if (addressGPR == recovery.gpr())
343                addressGPR = GPRInfo::regT1;
344
345            m_jit.storePtr(addressGPR, myScratch);
346            m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR);
347            m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
348
349            AssemblyHelpers::Jump positive = m_jit.branch32(AssemblyHelpers::GreaterThanOrEqual, recovery.gpr(), AssemblyHelpers::TrustedImm32(0));
350
351            m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0);
352            m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0);
353            if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
354                m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + currentPoisonIndex), addressGPR);
355                m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR);
356            } else
357                m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor((VirtualRegister)operand));
358
359            AssemblyHelpers::Jump done = m_jit.jump();
360
361            positive.link(&m_jit);
362
363            if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
364                m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
365                m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
366            } else {
367                m_jit.store32(recovery.gpr(), AssemblyHelpers::payloadFor((VirtualRegister)operand));
368                m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operand));
369            }
370
371            done.link(&m_jit);
372
373            m_jit.move(AssemblyHelpers::TrustedImmPtr(myScratch + 1), addressGPR);
374            m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0);
375            m_jit.loadPtr(myScratch, addressGPR);
376
377            if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
378                m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
379                currentPoisonIndex++;
380            }
381            break;
382        }
383        default:
384            break;
385        }
386    }
387
388    // 7) Dump all doubles into the stack, or to the scratch storage if the
389    //    destination virtual register is poisoned.
390    if (haveFPRs) {
391        for (size_t index = 0; index < operands.size(); ++index) {
392            const ValueRecovery& recovery = operands[index];
393            if (recovery.technique() != InFPR)
394                continue;
395            if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) {
396                m_jit.storeDouble(recovery.fpr(), scratchDataBuffer + currentPoisonIndex);
397                m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex;
398                currentPoisonIndex++;
399            } else
400                m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)));
401        }
402    }
403
404    // At this point all GPRs are available for scratch use.
405
406    ASSERT(currentPoisonIndex == numberOfPoisonedVirtualRegisters);
407
408    // 8) Reshuffle displaced virtual registers. Optimize for the case that
409    //    the number of displaced virtual registers is not more than the number
410    //    of available physical registers.
411
412    if (numberOfDisplacedVirtualRegisters) {
413        if (numberOfDisplacedVirtualRegisters * 2 <= GPRInfo::numberOfRegisters) {
414            // So far this appears to be the case that triggers all the time, but
415            // that is far from guaranteed.
416
417            unsigned displacementIndex = 0;
418            for (size_t index = 0; index < operands.size(); ++index) {
419                const ValueRecovery& recovery = operands[index];
420                switch (recovery.technique()) {
421                case DisplacedInJSStack:
422                    m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
423                    m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
424                    break;
425                case Int32DisplacedInJSStack:
426                    m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
427                    m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), GPRInfo::toRegister(displacementIndex++));
428                    break;
429                case CellDisplacedInJSStack:
430                    m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
431                    m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::toRegister(displacementIndex++));
432                    break;
433                case BooleanDisplacedInJSStack:
434                    m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
435                    m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), GPRInfo::toRegister(displacementIndex++));
436                    break;
437                default:
438                    break;
439                }
440            }
441
442            displacementIndex = 0;
443            for (size_t index = 0; index < operands.size(); ++index) {
444                const ValueRecovery& recovery = operands[index];
445                switch (recovery.technique()) {
446                case DisplacedInJSStack:
447                case Int32DisplacedInJSStack:
448                case CellDisplacedInJSStack:
449                case BooleanDisplacedInJSStack:
450                    m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
451                    m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
452                    break;
453                default:
454                    break;
455                }
456            }
457        } else {
458            // FIXME: This should use the shuffling algorithm that we use
459            // for speculative->non-speculative jumps, if we ever discover that
460            // some hot code with lots of live values that get displaced and
461            // spilled really enjoys frequently failing speculation.
462
463            // For now this code is engineered to be correct but probably not
464            // super. In particular, it correctly handles cases where for example
465            // the displacements are a permutation of the destination values, like
466            //
467            // 1 -> 2
468            // 2 -> 1
469            //
470            // It accomplishes this by simply lifting all of the virtual registers
471            // from their old (DFG JIT) locations and dropping them in a scratch
472            // location in memory, and then transferring from that scratch location
473            // to their new (old JIT) locations.
474
475            unsigned scratchIndex = numberOfPoisonedVirtualRegisters;
476            for (size_t index = 0; index < operands.size(); ++index) {
477                const ValueRecovery& recovery = operands[index];
478                switch (recovery.technique()) {
479                case DisplacedInJSStack:
480                    m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
481                    m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1);
482                    m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
483                    m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
484                    scratchIndex++;
485                    break;
486                case Int32DisplacedInJSStack:
487                case CellDisplacedInJSStack:
488                case BooleanDisplacedInJSStack:
489                    m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0);
490                    m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
491                    break;
492                default:
493                    break;
494                }
495            }
496
497            scratchIndex = numberOfPoisonedVirtualRegisters;
498            for (size_t index = 0; index < operands.size(); ++index) {
499                const ValueRecovery& recovery = operands[index];
500                switch (recovery.technique()) {
501                case DisplacedInJSStack:
502                    m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
503                    m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
504                    m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
505                    m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
506                    scratchIndex++;
507                    break;
508                case Int32DisplacedInJSStack:
509                    m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
510                    m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
511                    m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
512                    break;
513                case CellDisplacedInJSStack:
514                    m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
515                    m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
516                    m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
517                    break;
518                case BooleanDisplacedInJSStack:
519                    m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
520                    m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
521                    m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
522                    break;
523                default:
524                    break;
525                }
526            }
527
528            ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
529        }
530    }
531
532    // 9) Dump all poisoned virtual registers.
533
534    if (numberOfPoisonedVirtualRegisters) {
535        for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) {
536            if (!poisonedVirtualRegisters[virtualRegister])
537                continue;
538
539            const ValueRecovery& recovery = operands.local(virtualRegister);
540            switch (recovery.technique()) {
541            case InGPR:
542            case UnboxedInt32InGPR:
543            case UnboxedBooleanInGPR: {
544                m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
545                m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
546                uint32_t tag = JSValue::EmptyValueTag;
547                if (recovery.technique() == InGPR)
548                    tag = JSValue::CellTag;
549                else if (recovery.technique() == UnboxedInt32InGPR)
550                    tag = JSValue::Int32Tag;
551                else
552                    tag = JSValue::BooleanTag;
553                m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)virtualRegister));
554                break;
555            }
556
557            case InFPR:
558            case InPair:
559            case UInt32InGPR:
560                m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
561                m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
562                m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)virtualRegister));
563                m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)virtualRegister));
564                break;
565
566            default:
567                break;
568            }
569        }
570    }
571
572    // 10) Dump all constants. Optimize for Undefined, since that's a constant we see
573    //     often.
574
575    if (haveConstants) {
576        if (haveUndefined) {
577            m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().payload()), GPRInfo::regT0);
578            m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1);
579        }
580
581        for (size_t index = 0; index < operands.size(); ++index) {
582            const ValueRecovery& recovery = operands[index];
583            if (recovery.technique() != Constant)
584                continue;
585            if (recovery.constant().isUndefined()) {
586                m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
587                m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
588            } else {
589                m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index)));
590                m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index)));
591            }
592        }
593    }
594
595    // 12) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
596    //     that all new calls into this code will go to the new JIT, so the execute
597    //     counter only affects call frames that performed OSR exit and call frames
598    //     that were still executing the old JIT at the time of another call frame's
599    //     OSR exit. We want to ensure that the following is true:
600    //
601    //     (a) Code the performs an OSR exit gets a chance to reenter optimized
602    //         code eventually, since optimized code is faster. But we don't
603    //         want to do such reentery too aggressively (see (c) below).
604    //
605    //     (b) If there is code on the call stack that is still running the old
606    //         JIT's code and has never OSR'd, then it should get a chance to
607    //         perform OSR entry despite the fact that we've exited.
608    //
609    //     (c) Code the performs an OSR exit should not immediately retry OSR
610    //         entry, since both forms of OSR are expensive. OSR entry is
611    //         particularly expensive.
612    //
613    //     (d) Frequent OSR failures, even those that do not result in the code
614    //         running in a hot loop, result in recompilation getting triggered.
615    //
616    //     To ensure (c), we'd like to set the execute counter to
617    //     counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
618    //     (a) and (b), since then every OSR exit would delay the opportunity for
619    //     every call frame to perform OSR entry. Essentially, if OSR exit happens
620    //     frequently and the function has few loops, then the counter will never
621    //     become non-negative and OSR entry will never be triggered. OSR entry
622    //     will only happen if a loop gets hot in the old JIT, which does a pretty
623    //     good job of ensuring (a) and (b). But that doesn't take care of (d),
624    //     since each speculation failure would reset the execute counter.
625    //     So we check here if the number of speculation failures is significantly
626    //     larger than the number of successes (we want 90% success rate), and if
627    //     there have been a large enough number of failures. If so, we set the
628    //     counter to 0; otherwise we set the counter to
629    //     counterValueForOptimizeAfterWarmUp().
630
631    handleExitCounts(exit);
632
633    // 13) Reify inlined call frames.
634
635    ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT);
636    m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
637
638    for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
639        InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
640        CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
641        CodeBlock* baselineCodeBlockForCaller = m_jit.baselineCodeBlockFor(inlineCallFrame->caller);
642        Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlockForCaller);
643        unsigned returnBytecodeIndex = inlineCallFrame->caller.bytecodeIndex + OPCODE_LENGTH(op_call);
644        BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), returnBytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
645
646        ASSERT(mapping);
647        ASSERT(mapping->m_bytecodeIndex == returnBytecodeIndex);
648
649        void* jumpTarget = baselineCodeBlockForCaller->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
650
651        GPRReg callerFrameGPR;
652        if (inlineCallFrame->caller.inlineCallFrame) {
653            m_jit.add32(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
654            callerFrameGPR = GPRInfo::regT3;
655        } else
656            callerFrameGPR = GPRInfo::callFrameRegister;
657
658        m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
659        m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
660        if (!inlineCallFrame->isClosureCall())
661            m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
662        m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
663        m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame)));
664        m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC)));
665        m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
666        m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
667        if (!inlineCallFrame->isClosureCall())
668            m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
669    }
670
671    // 14) Create arguments if necessary and place them into the appropriate aliased
672    //     registers.
673
674    if (haveArguments) {
675        HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
676            NullableHashTraits<InlineCallFrame*> > didCreateArgumentsObject;
677
678        for (size_t index = 0; index < operands.size(); ++index) {
679            const ValueRecovery& recovery = operands[index];
680            if (recovery.technique() != ArgumentsThatWereNotCreated)
681                continue;
682            int operand = operands.operandForIndex(index);
683            // Find the right inline call frame.
684            InlineCallFrame* inlineCallFrame = 0;
685            for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
686                 current;
687                 current = current->caller.inlineCallFrame) {
688                if (current->stackOffset <= operand) {
689                    inlineCallFrame = current;
690                    break;
691                }
692            }
693
694            if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
695                continue;
696            int argumentsRegister = m_jit.argumentsRegisterFor(inlineCallFrame);
697            if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
698                // We know this call frame optimized out an arguments object that
699                // the baseline JIT would have created. Do that creation now.
700                if (inlineCallFrame) {
701                    m_jit.setupArgumentsWithExecState(
702                        AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
703                    m_jit.move(
704                        AssemblyHelpers::TrustedImmPtr(
705                            bitwise_cast<void*>(operationCreateInlinedArguments)),
706                        GPRInfo::nonArgGPR0);
707                } else {
708                    m_jit.setupArgumentsExecState();
709                    m_jit.move(
710                        AssemblyHelpers::TrustedImmPtr(
711                            bitwise_cast<void*>(operationCreateArguments)),
712                        GPRInfo::nonArgGPR0);
713                }
714                m_jit.call(GPRInfo::nonArgGPR0);
715                m_jit.store32(
716                    AssemblyHelpers::TrustedImm32(JSValue::CellTag),
717                    AssemblyHelpers::tagFor(argumentsRegister));
718                m_jit.store32(
719                    GPRInfo::returnValueGPR,
720                    AssemblyHelpers::payloadFor(argumentsRegister));
721                m_jit.store32(
722                    AssemblyHelpers::TrustedImm32(JSValue::CellTag),
723                    AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister)));
724                m_jit.store32(
725                    GPRInfo::returnValueGPR,
726                    AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister)));
727                m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
728            }
729
730            m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0);
731            m_jit.store32(
732                AssemblyHelpers::TrustedImm32(JSValue::CellTag),
733                AssemblyHelpers::tagFor(operand));
734            m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand));
735        }
736    }
737
738    // 15) Load the result of the last bytecode operation into regT0.
739
740    if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) {
741        m_jit.load32(AssemblyHelpers::payloadFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
742        m_jit.load32(AssemblyHelpers::tagFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister2);
743    }
744
745    // 16) Adjust the call frame pointer.
746
747    if (exit.m_codeOrigin.inlineCallFrame)
748        m_jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
749
750    // 17) Jump into the corresponding baseline JIT code.
751
752    CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(exit.m_codeOrigin);
753    Vector<BytecodeAndMachineOffset>& decodedCodeMap = m_jit.decodedCodeMapFor(baselineCodeBlock);
754
755    BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
756
757    ASSERT(mapping);
758    ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
759
760    void* jumpTarget = baselineCodeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
761
762    ASSERT(GPRInfo::regT2 != GPRInfo::cachedResultRegister && GPRInfo::regT2 != GPRInfo::cachedResultRegister2);
763
764    m_jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
765    m_jit.jump(GPRInfo::regT2);
766
767#if DFG_ENABLE(DEBUG_VERBOSE)
768    dataLogF("   -> %p\n", jumpTarget);
769#endif
770}
771
772} } // namespace JSC::DFG
773
774#endif // ENABLE(DFG_JIT) && USE(JSVALUE32_64)
775