1/*
2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSRExitCompilerCommon.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "Arguments.h"
32#include "DFGJITCode.h"
33#include "DFGOperations.h"
34#include "JIT.h"
35#include "JSCJSValueInlines.h"
36#include "JSCInlines.h"
37
38namespace JSC { namespace DFG {
39
40void handleExitCounts(CCallHelpers& jit, const OSRExitBase& exit)
41{
42    jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
43
44    jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()), GPRInfo::regT0);
45
46    AssemblyHelpers::Jump tooFewFails;
47
48    jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2);
49    jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
50    jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()));
51
52    jit.move(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), GPRInfo::regT0);
53    AssemblyHelpers::Jump reoptimizeNow = jit.branch32(
54        AssemblyHelpers::GreaterThanOrEqual,
55        AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()),
56        AssemblyHelpers::TrustedImm32(0));
57
58    tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()));
59
60    reoptimizeNow.link(&jit);
61
62    // Reoptimize as soon as possible.
63#if !NUMBER_OF_ARGUMENT_REGISTERS
64    jit.poke(GPRInfo::regT0);
65#else
66    jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0);
67    ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1);
68#endif
69    jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::regT1);
70    jit.call(GPRInfo::regT1);
71    AssemblyHelpers::Jump doneAdjusting = jit.jump();
72
73    tooFewFails.link(&jit);
74
75    // Adjust the execution counter such that the target is to only optimize after a while.
76    int32_t activeThreshold =
77        jit.baselineCodeBlock()->adjustedCounterValue(
78            Options::thresholdForOptimizeAfterLongWarmUp());
79    int32_t targetValue = applyMemoryUsageHeuristicsAndConvertToInt(
80        activeThreshold, jit.baselineCodeBlock());
81    int32_t clippedValue;
82    switch (jit.codeBlock()->jitType()) {
83    case JITCode::DFGJIT:
84        clippedValue = BaselineExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
85        break;
86    case JITCode::FTLJIT:
87        clippedValue = UpperTierExecutionCounter::clippedThreshold(jit.codeBlock()->globalObject(), targetValue);
88        break;
89    default:
90        RELEASE_ASSERT_NOT_REACHED();
91        clippedValue = 0; // Make some compilers, and mhahnenberg, happy.
92        break;
93    }
94    jit.store32(AssemblyHelpers::TrustedImm32(-clippedValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
95    jit.store32(AssemblyHelpers::TrustedImm32(activeThreshold), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
96    jit.store32(AssemblyHelpers::TrustedImm32(formattedTotalExecutionCount(clippedValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
97
98    doneAdjusting.link(&jit);
99}
100
101void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit)
102{
103    ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT);
104    jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock));
105
106    CodeOrigin codeOrigin;
107    for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
108        InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame;
109        CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin);
110        CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller);
111        unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex;
112        CallLinkInfo* callLinkInfo =
113            baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex);
114        RELEASE_ASSERT(callLinkInfo);
115
116        void* jumpTarget = callLinkInfo->callReturnLocation.executableAddress();
117
118        GPRReg callerFrameGPR;
119        if (inlineCallFrame->caller.inlineCallFrame) {
120            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3);
121            callerFrameGPR = GPRInfo::regT3;
122        } else
123            callerFrameGPR = GPRInfo::callFrameRegister;
124
125#if USE(JSVALUE64)
126        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
127        if (!inlineCallFrame->isClosureCall)
128            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()->scope()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
129        jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
130        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
131        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
132        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
133        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
134        if (!inlineCallFrame->isClosureCall)
135            jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
136
137        // Leave the captured arguments in regT3.
138        if (baselineCodeBlock->usesArguments())
139            jit.loadPtr(AssemblyHelpers::addressFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
140#else // USE(JSVALUE64) // so this is the 32-bit part
141        jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock)));
142        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
143        if (!inlineCallFrame->isClosureCall)
144            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain)));
145        jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset()));
146        jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset()));
147        Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex;
148        uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
149        jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
150        jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount)));
151        jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
152        if (!inlineCallFrame->isClosureCall)
153            jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee)));
154
155        // Leave the captured arguments in regT3.
156        if (baselineCodeBlock->usesArguments())
157            jit.loadPtr(AssemblyHelpers::payloadFor(VirtualRegister(inlineCallFrame->stackOffset + unmodifiedArgumentsRegister(baselineCodeBlock->argumentsRegister()).offset())), GPRInfo::regT3);
158#endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part
159
160        if (baselineCodeBlock->usesArguments()) {
161            AssemblyHelpers::Jump noArguments = jit.branchTestPtr(AssemblyHelpers::Zero, GPRInfo::regT3);
162            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
163            jit.storePtr(GPRInfo::regT0, AssemblyHelpers::Address(GPRInfo::regT3, Arguments::offsetOfRegisters()));
164            noArguments.link(&jit);
165        }
166    }
167
168#if USE(JSVALUE64)
169    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex);
170#else
171    Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex;
172    uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
173#endif
174    jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount)));
175}
176
177#if ENABLE(GGC)
178static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch)
179{
180    AssemblyHelpers::Jump ownerNotMarkedOrAlreadyRemembered = jit.checkMarkByte(owner);
181
182    // We need these extra slots because setupArgumentsWithExecState will use poke on x86.
183#if CPU(X86)
184    jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
185#endif
186
187    jit.setupArgumentsWithExecState(owner);
188    jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch);
189    jit.call(scratch);
190
191#if CPU(X86)
192    jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister);
193#endif
194
195    ownerNotMarkedOrAlreadyRemembered.link(&jit);
196}
197#endif // ENABLE(GGC)
198
199void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit)
200{
201#if ENABLE(GGC)
202    // 11) Write barrier the owner executables because we're jumping into a different block.
203    jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0);
204    osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
205    InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get();
206    if (inlineCallFrames) {
207        for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) {
208            ScriptExecutable* ownerExecutable = inlineCallFrame->executable.get();
209            jit.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable), GPRInfo::nonArgGPR0);
210            osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1);
211        }
212    }
213#endif
214
215    if (exit.m_codeOrigin.inlineCallFrame)
216        jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister);
217
218    CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin);
219    Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock);
220
221    BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex);
222
223    ASSERT(mapping);
224    ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex);
225
226    void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
227
228    jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister);
229
230    jit.jitAssertTagsInPlace();
231
232    jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2);
233    jit.jump(GPRInfo::regT2);
234}
235
236ArgumentsRecoveryGenerator::ArgumentsRecoveryGenerator() { }
237ArgumentsRecoveryGenerator::~ArgumentsRecoveryGenerator() { }
238
239void ArgumentsRecoveryGenerator::generateFor(
240    int operand, CodeOrigin codeOrigin, CCallHelpers& jit)
241{
242    // Find the right inline call frame.
243    InlineCallFrame* inlineCallFrame = 0;
244    for (InlineCallFrame* current = codeOrigin.inlineCallFrame;
245         current;
246         current = current->caller.inlineCallFrame) {
247        if (current->stackOffset >= operand) {
248            inlineCallFrame = current;
249            break;
250        }
251    }
252
253    if (!jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
254        return;
255    VirtualRegister argumentsRegister = jit.baselineArgumentsRegisterFor(inlineCallFrame);
256    if (m_didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
257        // We know this call frame optimized out an arguments object that
258        // the baseline JIT would have created. Do that creation now.
259#if USE(JSVALUE64)
260        if (inlineCallFrame) {
261            jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT0);
262            jit.setupArguments(GPRInfo::regT0);
263        } else
264            jit.setupArgumentsExecState();
265        jit.move(
266            AssemblyHelpers::TrustedImmPtr(
267                bitwise_cast<void*>(operationCreateArgumentsDuringOSRExit)),
268            GPRInfo::nonArgGPR0);
269        jit.call(GPRInfo::nonArgGPR0);
270        jit.store64(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(argumentsRegister));
271        jit.store64(
272            GPRInfo::returnValueGPR,
273            AssemblyHelpers::addressFor(unmodifiedArgumentsRegister(argumentsRegister)));
274        jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
275#else // USE(JSVALUE64) -> so the 32_64 part
276        if (inlineCallFrame) {
277            jit.setupArgumentsWithExecState(
278                AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
279            jit.move(
280                AssemblyHelpers::TrustedImmPtr(
281                    bitwise_cast<void*>(operationCreateInlinedArgumentsDuringOSRExit)),
282                GPRInfo::nonArgGPR0);
283        } else {
284            jit.setupArgumentsExecState();
285            jit.move(
286                AssemblyHelpers::TrustedImmPtr(
287                    bitwise_cast<void*>(operationCreateArgumentsDuringOSRExit)),
288                GPRInfo::nonArgGPR0);
289        }
290        jit.call(GPRInfo::nonArgGPR0);
291        jit.store32(
292            AssemblyHelpers::TrustedImm32(JSValue::CellTag),
293            AssemblyHelpers::tagFor(argumentsRegister));
294        jit.store32(
295            GPRInfo::returnValueGPR,
296            AssemblyHelpers::payloadFor(argumentsRegister));
297        jit.store32(
298            AssemblyHelpers::TrustedImm32(JSValue::CellTag),
299            AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister)));
300        jit.store32(
301            GPRInfo::returnValueGPR,
302            AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister)));
303        jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
304#endif // USE(JSVALUE64)
305    }
306
307#if USE(JSVALUE64)
308    jit.load64(AssemblyHelpers::addressFor(argumentsRegister), GPRInfo::regT0);
309    jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(operand));
310#else // USE(JSVALUE64) -> so the 32_64 part
311    jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0);
312    jit.store32(
313        AssemblyHelpers::TrustedImm32(JSValue::CellTag),
314        AssemblyHelpers::tagFor(operand));
315    jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand));
316#endif // USE(JSVALUE64)
317}
318
319} } // namespace JSC::DFG
320
321#endif // ENABLE(DFG_JIT)
322
323