1/*
2 * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGJITCompiler.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "CodeBlock.h"
32#include "DFGOSRExitCompiler.h"
33#include "DFGOperations.h"
34#include "DFGRegisterBank.h"
35#include "DFGSlowPathGenerator.h"
36#include "DFGSpeculativeJIT.h"
37#include "DFGThunks.h"
38#include "JSCJSValueInlines.h"
39#include "VM.h"
40#include "LinkBuffer.h"
41
42namespace JSC { namespace DFG {
43
44JITCompiler::JITCompiler(Graph& dfg)
45    : CCallHelpers(&dfg.m_vm, dfg.m_codeBlock)
46    , m_graph(dfg)
47    , m_currentCodeOriginIndex(0)
48{
49    if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
50        m_disassembler = adoptPtr(new Disassembler(dfg));
51}
52
53void JITCompiler::linkOSRExits()
54{
55    ASSERT(codeBlock()->numberOfOSRExits() == m_exitCompilationInfo.size());
56    if (m_graph.m_compilation) {
57        for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
58            OSRExit& exit = codeBlock()->osrExit(i);
59            Vector<Label> labels;
60            if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max()) {
61                OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
62                for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
63                    labels.append(info.m_failureJumps.jumps()[j].label());
64            } else
65                labels.append(codeBlock()->watchpoint(exit.m_watchpointIndex).sourceLabel());
66            m_exitSiteLabels.append(labels);
67        }
68    }
69
70    for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
71        OSRExit& exit = codeBlock()->osrExit(i);
72        JumpList& failureJumps = m_exitCompilationInfo[i].m_failureJumps;
73        ASSERT(failureJumps.empty() == (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()));
74        if (exit.m_watchpointIndex == std::numeric_limits<unsigned>::max())
75            failureJumps.link(this);
76        else
77            codeBlock()->watchpoint(exit.m_watchpointIndex).setDestination(label());
78        jitAssertHasValidCallFrame();
79        store32(TrustedImm32(i), &vm()->osrExitIndex);
80        exit.setPatchableCodeOffset(patchableJump());
81    }
82}
83
84void JITCompiler::compileEntry()
85{
86    // This code currently matches the old JIT. In the function header we need to
87    // pop the return address (since we do not allow any recursion on the machine
88    // stack), and perform a fast stack check.
89    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
90    // We'll need to convert the remaining cti_ style calls (specifically the stack
91    // check) which will be dependent on stack layout. (We'd need to account for this in
92    // both normal return code and when jumping to an exception handler).
93    preserveReturnAddressAfterCall(GPRInfo::regT2);
94    emitPutToCallFrameHeader(GPRInfo::regT2, JSStack::ReturnPC);
95    emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
96}
97
98void JITCompiler::compileBody(SpeculativeJIT& speculative)
99{
100    // We generate the speculative code path, followed by OSR exit code to return
101    // to the old JIT code if speculations fail.
102
103#if DFG_ENABLE(JIT_BREAK_ON_EVERY_FUNCTION)
104    // Handy debug tool!
105    breakpoint();
106#endif
107
108    bool compiledSpeculative = speculative.compile();
109    ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
110}
111
112void JITCompiler::compileExceptionHandlers()
113{
114    // Iterate over the m_calls vector, checking for jumps to link.
115    bool didLinkExceptionCheck = false;
116    for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
117        Jump& exceptionCheck = m_exceptionChecks[i].m_exceptionCheck;
118        if (exceptionCheck.isSet()) {
119            exceptionCheck.link(this);
120            didLinkExceptionCheck = true;
121        }
122    }
123
124    // If any exception checks were linked, generate code to lookup a handler.
125    if (didLinkExceptionCheck) {
126        // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
127        // the index into the CodeBlock's callReturnIndexVector corresponding to the
128        // call that threw the exception (this was set in nonPreservedNonReturnGPR, when
129        // the exception check was planted).
130        move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
131        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
132#if CPU(X86)
133        // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
134        poke(GPRInfo::argumentGPR0);
135        poke(GPRInfo::argumentGPR1, 1);
136#endif
137        m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
138        // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
139        // and the address of the handler in returnValueGPR2.
140        jump(GPRInfo::returnValueGPR2);
141    }
142}
143
144void JITCompiler::link(LinkBuffer& linkBuffer)
145{
146    // Link the code, populate data in CodeBlock data structures.
147#if DFG_ENABLE(DEBUG_VERBOSE)
148    dataLogF("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
149#endif
150
151    // Link all calls out from the JIT code to their respective functions.
152    for (unsigned i = 0; i < m_calls.size(); ++i)
153        linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
154
155    m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
156    for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
157        unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
158        CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
159        while (codeOrigin.inlineCallFrame)
160            codeOrigin = codeOrigin.inlineCallFrame->caller;
161        unsigned exceptionInfo = codeOrigin.bytecodeIndex;
162        m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
163    }
164
165    Vector<CodeOriginAtCallReturnOffset, 0, UnsafeVectorOverflow>& codeOrigins = m_codeBlock->codeOrigins();
166    codeOrigins.resize(m_exceptionChecks.size());
167
168    for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
169        CallExceptionRecord& record = m_exceptionChecks[i];
170        unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
171        codeOrigins[i].codeOrigin = record.m_codeOrigin;
172        codeOrigins[i].callReturnOffset = returnAddressOffset;
173    }
174
175    m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
176    for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
177        StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
178        CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call());
179        info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
180        info.callReturnLocation = callReturnLocation;
181        info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation);
182        info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck));
183#if USE(JSVALUE64)
184        info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore));
185#else
186        info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore));
187        info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore));
188#endif
189        info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label()));
190        info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done));
191        info.patch.dfg.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad));
192        info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
193#if USE(JSVALUE64)
194        info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
195#else
196        info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
197        info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
198#endif
199        m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters);
200        info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
201    }
202
203    m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
204    for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
205        CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
206        info.callType = m_jsCalls[i].m_callType;
207        info.isDFG = true;
208        info.codeOrigin = m_jsCalls[i].m_codeOrigin;
209        linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress()));
210        info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall);
211        info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
212        info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
213        info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee);
214    }
215
216    MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
217    CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code());
218    for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
219        OSRExit& exit = codeBlock()->osrExit(i);
220        linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target);
221        exit.correctJump(linkBuffer);
222        if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max())
223            codeBlock()->watchpoint(exit.m_watchpointIndex).correctLabels(linkBuffer);
224    }
225
226    if (m_graph.m_compilation) {
227        ASSERT(m_exitSiteLabels.size() == codeBlock()->numberOfOSRExits());
228        for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
229            Vector<Label>& labels = m_exitSiteLabels[i];
230            Vector<const void*> addresses;
231            for (unsigned j = 0; j < labels.size(); ++j)
232                addresses.append(linkBuffer.locationOf(labels[j]).executableAddress());
233            m_graph.m_compilation->addOSRExitSite(addresses);
234        }
235    } else
236        ASSERT(!m_exitSiteLabels.size());
237
238    codeBlock()->saveCompilation(m_graph.m_compilation);
239}
240
241bool JITCompiler::compile(JITCode& entry)
242{
243    SamplingRegion samplingRegion("DFG Backend");
244
245    setStartOfCode();
246    compileEntry();
247    SpeculativeJIT speculative(*this);
248    compileBody(speculative);
249    setEndOfMainPath();
250
251    // Generate slow path code.
252    speculative.runSlowPathGenerators();
253
254    compileExceptionHandlers();
255    linkOSRExits();
256
257    // Create OSR entry trampolines if necessary.
258    speculative.createOSREntries();
259    setEndOfCode();
260
261    LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
262    if (linkBuffer.didFailToAllocate())
263        return false;
264    link(linkBuffer);
265    speculative.linkOSREntries(linkBuffer);
266    codeBlock()->shrinkToFit(CodeBlock::LateShrink);
267
268    if (shouldShowDisassembly())
269        m_disassembler->dump(linkBuffer);
270    if (m_graph.m_compilation)
271        m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
272
273    entry = JITCode(
274        linkBuffer.finalizeCodeWithoutDisassembly(),
275        JITCode::DFGJIT);
276    return true;
277}
278
279bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
280{
281    SamplingRegion samplingRegion("DFG Backend");
282
283    setStartOfCode();
284    compileEntry();
285
286    // === Function header code generation ===
287    // This is the main entry point, without performing an arity check.
288    // If we needed to perform an arity check we will already have moved the return address,
289    // so enter after this.
290    Label fromArityCheck(this);
291    // Plant a check that sufficient space is available in the JSStack.
292    // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
293    addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
294    Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
295    // Return here after stack check.
296    Label fromStackCheck = label();
297
298
299    // === Function body code generation ===
300    SpeculativeJIT speculative(*this);
301    compileBody(speculative);
302    setEndOfMainPath();
303
304    // === Function footer code generation ===
305    //
306    // Generate code to perform the slow stack check (if the fast one in
307    // the function header fails), and generate the entry point with arity check.
308    //
309    // Generate the stack check; if the fast check in the function head fails,
310    // we need to call out to a helper function to check whether more space is available.
311    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
312    stackCheck.link(this);
313    move(stackPointerRegister, GPRInfo::argumentGPR0);
314    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
315
316    CallBeginToken token;
317    beginCall(CodeOrigin(0), token);
318    Call callStackCheck = call();
319    notifyCall(callStackCheck, CodeOrigin(0), token);
320    jump(fromStackCheck);
321
322    // The fast entry point into a function does not check the correct number of arguments
323    // have been passed to the call (we only use the fast entry point where we can statically
324    // determine the correct number of arguments have been passed, or have already checked).
325    // In cases where an arity check is necessary, we enter here.
326    // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
327    Label arityCheck = label();
328    compileEntry();
329
330    load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
331    branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
332    move(stackPointerRegister, GPRInfo::argumentGPR0);
333    poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
334    beginCall(CodeOrigin(0), token);
335    Call callArityCheck = call();
336    notifyCall(callArityCheck, CodeOrigin(0), token);
337    move(GPRInfo::regT0, GPRInfo::callFrameRegister);
338    jump(fromArityCheck);
339
340    // Generate slow path code.
341    speculative.runSlowPathGenerators();
342
343    compileExceptionHandlers();
344    linkOSRExits();
345
346    // Create OSR entry trampolines if necessary.
347    speculative.createOSREntries();
348    setEndOfCode();
349
350    // === Link ===
351    LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail);
352    if (linkBuffer.didFailToAllocate())
353        return false;
354    link(linkBuffer);
355    speculative.linkOSREntries(linkBuffer);
356    codeBlock()->shrinkToFit(CodeBlock::LateShrink);
357
358    // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs.
359    linkBuffer.link(callStackCheck, cti_stack_check);
360    linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
361
362    if (shouldShowDisassembly())
363        m_disassembler->dump(linkBuffer);
364    if (m_graph.m_compilation)
365        m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer);
366
367    entryWithArityCheck = linkBuffer.locationOf(arityCheck);
368    entry = JITCode(
369        linkBuffer.finalizeCodeWithoutDisassembly(),
370        JITCode::DFGJIT);
371    return true;
372}
373
374} } // namespace JSC::DFG
375
376#endif // ENABLE(DFG_JIT)
377