1/*
2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "FTLCompile.h"
28
29#if ENABLE(FTL_JIT)
30
31#include "CodeBlockWithJITType.h"
32#include "CCallHelpers.h"
33#include "DFGCommon.h"
34#include "DFGGraphSafepoint.h"
35#include "DataView.h"
36#include "Disassembler.h"
37#include "FTLExitThunkGenerator.h"
38#include "FTLInlineCacheSize.h"
39#include "FTLJITCode.h"
40#include "FTLThunks.h"
41#include "FTLUnwindInfo.h"
42#include "JITStubs.h"
43#include "LLVMAPI.h"
44#include "LinkBuffer.h"
45#include "RepatchBuffer.h"
46
47namespace JSC { namespace FTL {
48
49using namespace DFG;
50
51static uint8_t* mmAllocateCodeSection(
52    void* opaqueState, uintptr_t size, unsigned alignment, unsigned, const char* sectionName)
53{
54    State& state = *static_cast<State*>(opaqueState);
55
56    RELEASE_ASSERT(alignment <= jitAllocationGranule);
57
58    RefPtr<ExecutableMemoryHandle> result =
59        state.graph.m_vm.executableAllocator.allocate(
60            state.graph.m_vm, size, state.graph.m_codeBlock, JITCompilationMustSucceed);
61
62    // LLVM used to put __compact_unwind in a code section. We keep this here defensively,
63    // for clients that use older LLVMs.
64    if (!strcmp(sectionName, "__compact_unwind")) {
65        state.compactUnwind = result->start();
66        state.compactUnwindSize = result->sizeInBytes();
67    }
68
69    state.jitCode->addHandle(result);
70    state.codeSectionNames.append(sectionName);
71
72    return static_cast<uint8_t*>(result->start());
73}
74
75static uint8_t* mmAllocateDataSection(
76    void* opaqueState, uintptr_t size, unsigned alignment, unsigned sectionID,
77    const char* sectionName, LLVMBool isReadOnly)
78{
79    UNUSED_PARAM(sectionID);
80    UNUSED_PARAM(isReadOnly);
81
82    // Allocate the GOT in the code section to make it reachable for all code.
83    if (!strcmp(sectionName, "__got"))
84        return mmAllocateCodeSection(opaqueState, size, alignment, sectionID, sectionName);
85
86    State& state = *static_cast<State*>(opaqueState);
87
88    RefPtr<DataSection> section = adoptRef(new DataSection(size, alignment));
89
90    if (!strcmp(sectionName, "__llvm_stackmaps"))
91        state.stackmapsSection = section;
92    else {
93        state.jitCode->addDataSection(section);
94        state.dataSectionNames.append(sectionName);
95        if (!strcmp(sectionName, "__compact_unwind")) {
96            state.compactUnwind = section->base();
97            state.compactUnwindSize = size;
98        }
99    }
100
101    return bitwise_cast<uint8_t*>(section->base());
102}
103
104static LLVMBool mmApplyPermissions(void*, char**)
105{
106    return false;
107}
108
109static void mmDestroy(void*)
110{
111}
112
113static void dumpDataSection(DataSection* section, const char* prefix)
114{
115    for (unsigned j = 0; j < section->size() / sizeof(int64_t); ++j) {
116        char buf[32];
117        int64_t* wordPointer = static_cast<int64_t*>(section->base()) + j;
118        snprintf(buf, sizeof(buf), "0x%lx", static_cast<unsigned long>(bitwise_cast<uintptr_t>(wordPointer)));
119        dataLogF("%s%16s: 0x%016llx\n", prefix, buf, static_cast<long long>(*wordPointer));
120    }
121}
122
123template<typename DescriptorType>
124void generateICFastPath(
125    State& state, CodeBlock* codeBlock, GeneratedFunction generatedFunction,
126    StackMaps::RecordMap& recordMap, DescriptorType& ic, size_t sizeOfIC)
127{
128    VM& vm = state.graph.m_vm;
129
130    StackMaps::RecordMap::iterator iter = recordMap.find(ic.stackmapID());
131    if (iter == recordMap.end()) {
132        // It was optimized out.
133        return;
134    }
135
136    Vector<StackMaps::Record>& records = iter->value;
137
138    RELEASE_ASSERT(records.size() == ic.m_generators.size());
139
140    for (unsigned i = records.size(); i--;) {
141        StackMaps::Record& record = records[i];
142        auto generator = ic.m_generators[i];
143
144        CCallHelpers fastPathJIT(&vm, codeBlock);
145        generator.generateFastPath(fastPathJIT);
146
147        char* startOfIC =
148            bitwise_cast<char*>(generatedFunction) + record.instructionOffset;
149
150        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfIC);
151        // Note: we could handle the !isValid() case. We just don't appear to have a
152        // reason to do so, yet.
153        RELEASE_ASSERT(linkBuffer.isValid());
154
155        MacroAssembler::AssemblerType_T::fillNops(
156            startOfIC + linkBuffer.size(), sizeOfIC - linkBuffer.size());
157
158        state.finalizer->sideCodeLinkBuffer->link(
159            ic.m_slowPathDone[i], CodeLocationLabel(startOfIC + sizeOfIC));
160
161        linkBuffer.link(
162            generator.slowPathJump(),
163            state.finalizer->sideCodeLinkBuffer->locationOf(generator.slowPathBegin()));
164
165        generator.finalize(linkBuffer, *state.finalizer->sideCodeLinkBuffer);
166    }
167}
168
169static RegisterSet usedRegistersFor(const StackMaps::Record& record)
170{
171    if (Options::assumeAllRegsInFTLICAreLive())
172        return RegisterSet::allRegisters();
173    return RegisterSet(record.usedRegisterSet(), RegisterSet::calleeSaveRegisters());
174}
175
176static void fixFunctionBasedOnStackMaps(
177    State& state, CodeBlock* codeBlock, JITCode* jitCode, GeneratedFunction generatedFunction,
178    StackMaps::RecordMap& recordMap, bool didSeeUnwindInfo)
179{
180    Graph& graph = state.graph;
181    VM& vm = graph.m_vm;
182    StackMaps stackmaps = jitCode->stackmaps;
183
184    StackMaps::RecordMap::iterator iter = recordMap.find(state.capturedStackmapID);
185    RELEASE_ASSERT(iter != recordMap.end());
186    RELEASE_ASSERT(iter->value.size() == 1);
187    RELEASE_ASSERT(iter->value[0].locations.size() == 1);
188    Location capturedLocation =
189        Location::forStackmaps(&jitCode->stackmaps, iter->value[0].locations[0]);
190    RELEASE_ASSERT(capturedLocation.kind() == Location::Register);
191    RELEASE_ASSERT(capturedLocation.gpr() == GPRInfo::callFrameRegister);
192    RELEASE_ASSERT(!(capturedLocation.addend() % sizeof(Register)));
193    int32_t localsOffset = capturedLocation.addend() / sizeof(Register) + graph.m_nextMachineLocal;
194
195    for (unsigned i = graph.m_inlineVariableData.size(); i--;) {
196        InlineCallFrame* inlineCallFrame = graph.m_inlineVariableData[i].inlineCallFrame;
197
198        if (inlineCallFrame->argumentsRegister.isValid()) {
199            inlineCallFrame->argumentsRegister = VirtualRegister(
200                inlineCallFrame->argumentsRegister.offset() + localsOffset);
201        }
202
203        for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) {
204            inlineCallFrame->arguments[argument] =
205                inlineCallFrame->arguments[argument].withLocalsOffset(localsOffset);
206        }
207
208        if (inlineCallFrame->isClosureCall) {
209            inlineCallFrame->calleeRecovery =
210                inlineCallFrame->calleeRecovery.withLocalsOffset(localsOffset);
211        }
212    }
213
214    if (codeBlock->usesArguments()) {
215        codeBlock->setArgumentsRegister(
216            VirtualRegister(codeBlock->argumentsRegister().offset() + localsOffset));
217    }
218
219    MacroAssembler::Label stackOverflowException;
220
221    {
222        CCallHelpers checkJIT(&vm, codeBlock);
223
224        // At this point it's perfectly fair to just blow away all state and restore the
225        // JS JIT view of the universe.
226        checkJIT.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
227
228        MacroAssembler::Label exceptionContinueArg1Set = checkJIT.label();
229        checkJIT.move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
230        checkJIT.move(MacroAssembler::TrustedImm64(TagMask), GPRInfo::tagMaskRegister);
231
232        checkJIT.move(MacroAssembler::TrustedImmPtr(&vm), GPRInfo::argumentGPR0);
233        MacroAssembler::Call call = checkJIT.call();
234        checkJIT.jumpToExceptionHandler();
235
236        stackOverflowException = checkJIT.label();
237        checkJIT.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR1);
238        checkJIT.jump(exceptionContinueArg1Set);
239
240        OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(
241            vm, checkJIT, codeBlock, JITCompilationMustSucceed));
242        linkBuffer->link(call, FunctionPtr(lookupExceptionHandler));
243
244        state.finalizer->handleExceptionsLinkBuffer = linkBuffer.release();
245    }
246
247    ExitThunkGenerator exitThunkGenerator(state);
248    exitThunkGenerator.emitThunks();
249    if (exitThunkGenerator.didThings()) {
250        RELEASE_ASSERT(state.finalizer->osrExit.size());
251        RELEASE_ASSERT(didSeeUnwindInfo);
252
253        OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(
254            vm, exitThunkGenerator, codeBlock, JITCompilationMustSucceed));
255
256        RELEASE_ASSERT(state.finalizer->osrExit.size() == state.jitCode->osrExit.size());
257
258        for (unsigned i = 0; i < state.jitCode->osrExit.size(); ++i) {
259            OSRExitCompilationInfo& info = state.finalizer->osrExit[i];
260            OSRExit& exit = jitCode->osrExit[i];
261
262            if (verboseCompilationEnabled())
263                dataLog("Handling OSR stackmap #", exit.m_stackmapID, " for ", exit.m_codeOrigin, "\n");
264
265            iter = recordMap.find(exit.m_stackmapID);
266            if (iter == recordMap.end()) {
267                // It was optimized out.
268                continue;
269            }
270
271            info.m_thunkAddress = linkBuffer->locationOf(info.m_thunkLabel);
272            exit.m_patchableCodeOffset = linkBuffer->offsetOf(info.m_thunkJump);
273
274            for (unsigned j = exit.m_values.size(); j--;) {
275                ExitValue value = exit.m_values[j];
276                if (!value.isInJSStackSomehow())
277                    continue;
278                if (!value.virtualRegister().isLocal())
279                    continue;
280                exit.m_values[j] = value.withVirtualRegister(
281                    VirtualRegister(value.virtualRegister().offset() + localsOffset));
282            }
283
284            if (verboseCompilationEnabled()) {
285                DumpContext context;
286                dataLog("    Exit values: ", inContext(exit.m_values, &context), "\n");
287            }
288        }
289
290        state.finalizer->exitThunksLinkBuffer = linkBuffer.release();
291    }
292
293    if (!state.getByIds.isEmpty() || !state.putByIds.isEmpty()) {
294        CCallHelpers slowPathJIT(&vm, codeBlock);
295
296        CCallHelpers::JumpList exceptionTarget;
297
298        for (unsigned i = state.getByIds.size(); i--;) {
299            GetByIdDescriptor& getById = state.getByIds[i];
300
301            if (verboseCompilationEnabled())
302                dataLog("Handling GetById stackmap #", getById.stackmapID(), "\n");
303
304            iter = recordMap.find(getById.stackmapID());
305            if (iter == recordMap.end()) {
306                // It was optimized out.
307                continue;
308            }
309
310            for (unsigned i = 0; i < iter->value.size(); ++i) {
311                StackMaps::Record& record = iter->value[i];
312
313                RegisterSet usedRegisters = usedRegistersFor(record);
314
315                GPRReg result = record.locations[0].directGPR();
316                GPRReg base = record.locations[1].directGPR();
317
318                JITGetByIdGenerator gen(
319                    codeBlock, getById.codeOrigin(), usedRegisters, JSValueRegs(base),
320                    JSValueRegs(result), NeedToSpill);
321
322                MacroAssembler::Label begin = slowPathJIT.label();
323
324                MacroAssembler::Call call = callOperation(
325                    state, usedRegisters, slowPathJIT, getById.codeOrigin(), &exceptionTarget,
326                    operationGetByIdOptimize, result, gen.stubInfo(), base, getById.uid());
327
328                gen.reportSlowPathCall(begin, call);
329
330                getById.m_slowPathDone.append(slowPathJIT.jump());
331                getById.m_generators.append(gen);
332            }
333        }
334
335        for (unsigned i = state.putByIds.size(); i--;) {
336            PutByIdDescriptor& putById = state.putByIds[i];
337
338            if (verboseCompilationEnabled())
339                dataLog("Handling PutById stackmap #", putById.stackmapID(), "\n");
340
341            iter = recordMap.find(putById.stackmapID());
342            if (iter == recordMap.end()) {
343                // It was optimized out.
344                continue;
345            }
346
347            for (unsigned i = 0; i < iter->value.size(); ++i) {
348                StackMaps::Record& record = iter->value[i];
349
350                RegisterSet usedRegisters = usedRegistersFor(record);
351
352                GPRReg base = record.locations[0].directGPR();
353                GPRReg value = record.locations[1].directGPR();
354
355                JITPutByIdGenerator gen(
356                    codeBlock, putById.codeOrigin(), usedRegisters, JSValueRegs(base),
357                    JSValueRegs(value), GPRInfo::patchpointScratchRegister, NeedToSpill,
358                    putById.ecmaMode(), putById.putKind());
359
360                MacroAssembler::Label begin = slowPathJIT.label();
361
362                MacroAssembler::Call call = callOperation(
363                    state, usedRegisters, slowPathJIT, putById.codeOrigin(), &exceptionTarget,
364                    gen.slowPathFunction(), gen.stubInfo(), value, base, putById.uid());
365
366                gen.reportSlowPathCall(begin, call);
367
368                putById.m_slowPathDone.append(slowPathJIT.jump());
369                putById.m_generators.append(gen);
370            }
371        }
372
373        exceptionTarget.link(&slowPathJIT);
374        MacroAssembler::Jump exceptionJump = slowPathJIT.jump();
375
376        state.finalizer->sideCodeLinkBuffer = adoptPtr(
377            new LinkBuffer(vm, slowPathJIT, codeBlock, JITCompilationMustSucceed));
378        state.finalizer->sideCodeLinkBuffer->link(
379            exceptionJump, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
380
381        for (unsigned i = state.getByIds.size(); i--;) {
382            generateICFastPath(
383                state, codeBlock, generatedFunction, recordMap, state.getByIds[i],
384                sizeOfGetById());
385        }
386        for (unsigned i = state.putByIds.size(); i--;) {
387            generateICFastPath(
388                state, codeBlock, generatedFunction, recordMap, state.putByIds[i],
389                sizeOfPutById());
390        }
391    }
392
393    // Handling JS calls is weird: we need to ensure that we sort them by the PC in LLVM
394    // generated code. That implies first pruning the ones that LLVM didn't generate.
395    Vector<JSCall> oldCalls = state.jsCalls;
396    state.jsCalls.resize(0);
397    for (unsigned i = 0; i < oldCalls.size(); ++i) {
398        JSCall& call = oldCalls[i];
399
400        StackMaps::RecordMap::iterator iter = recordMap.find(call.stackmapID());
401        if (iter == recordMap.end())
402            continue;
403
404        for (unsigned j = 0; j < iter->value.size(); ++j) {
405            JSCall copy = call;
406            copy.m_instructionOffset = iter->value[j].instructionOffset;
407            state.jsCalls.append(copy);
408        }
409    }
410
411    std::sort(state.jsCalls.begin(), state.jsCalls.end());
412
413    for (unsigned i = state.jsCalls.size(); i--;) {
414        JSCall& call = state.jsCalls[i];
415
416        CCallHelpers fastPathJIT(&vm, codeBlock);
417        call.emit(fastPathJIT);
418
419        char* startOfIC = bitwise_cast<char*>(generatedFunction) + call.m_instructionOffset;
420
421        LinkBuffer linkBuffer(vm, fastPathJIT, startOfIC, sizeOfCall());
422        if (!linkBuffer.isValid()) {
423            dataLog("Failed to insert inline cache for call because we thought the size would be ", sizeOfCall(), " but it ended up being ", fastPathJIT.m_assembler.codeSize(), " prior to compaction.\n");
424            RELEASE_ASSERT_NOT_REACHED();
425        }
426
427        MacroAssembler::AssemblerType_T::fillNops(
428            startOfIC + linkBuffer.size(), sizeOfCall() - linkBuffer.size());
429
430        call.link(vm, linkBuffer);
431    }
432
433    RepatchBuffer repatchBuffer(codeBlock);
434
435    iter = recordMap.find(state.handleStackOverflowExceptionStackmapID);
436    // It's sort of remotely possible that we won't have an in-band exception handling
437    // path, for some kinds of functions.
438    if (iter != recordMap.end()) {
439        for (unsigned i = iter->value.size(); i--;) {
440            StackMaps::Record& record = iter->value[i];
441
442            CodeLocationLabel source = CodeLocationLabel(
443                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
444
445            RELEASE_ASSERT(stackOverflowException.isSet());
446
447            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->locationOf(stackOverflowException));
448        }
449    }
450
451    iter = recordMap.find(state.handleExceptionStackmapID);
452    // It's sort of remotely possible that we won't have an in-band exception handling
453    // path, for some kinds of functions.
454    if (iter != recordMap.end()) {
455        for (unsigned i = iter->value.size(); i--;) {
456            StackMaps::Record& record = iter->value[i];
457
458            CodeLocationLabel source = CodeLocationLabel(
459                bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
460
461            repatchBuffer.replaceWithJump(source, state.finalizer->handleExceptionsLinkBuffer->entrypoint());
462        }
463    }
464
465    for (unsigned exitIndex = 0; exitIndex < jitCode->osrExit.size(); ++exitIndex) {
466        OSRExitCompilationInfo& info = state.finalizer->osrExit[exitIndex];
467        OSRExit& exit = jitCode->osrExit[exitIndex];
468        iter = recordMap.find(exit.m_stackmapID);
469
470        Vector<const void*> codeAddresses;
471
472        if (iter != recordMap.end()) {
473            for (unsigned i = iter->value.size(); i--;) {
474                StackMaps::Record& record = iter->value[i];
475
476                CodeLocationLabel source = CodeLocationLabel(
477                    bitwise_cast<char*>(generatedFunction) + record.instructionOffset);
478
479                codeAddresses.append(bitwise_cast<char*>(generatedFunction) + record.instructionOffset + MacroAssembler::maxJumpReplacementSize());
480
481                if (info.m_isInvalidationPoint)
482                    jitCode->common.jumpReplacements.append(JumpReplacement(source, info.m_thunkAddress));
483                else
484                    repatchBuffer.replaceWithJump(source, info.m_thunkAddress);
485            }
486        }
487
488        if (graph.compilation())
489            graph.compilation()->addOSRExitSite(codeAddresses);
490    }
491}
492
493void compile(State& state, Safepoint::Result& safepointResult)
494{
495    char* error = 0;
496
497    {
498        GraphSafepoint safepoint(state.graph, safepointResult);
499
500        LLVMMCJITCompilerOptions options;
501        llvm->InitializeMCJITCompilerOptions(&options, sizeof(options));
502        options.OptLevel = Options::llvmBackendOptimizationLevel();
503        options.NoFramePointerElim = true;
504        if (Options::useLLVMSmallCodeModel())
505            options.CodeModel = LLVMCodeModelSmall;
506        options.EnableFastISel = Options::enableLLVMFastISel();
507        options.MCJMM = llvm->CreateSimpleMCJITMemoryManager(
508            &state, mmAllocateCodeSection, mmAllocateDataSection, mmApplyPermissions, mmDestroy);
509
510        LLVMExecutionEngineRef engine;
511
512        if (isARM64())
513            llvm->SetTarget(state.module, "arm64-apple-ios");
514
515        if (llvm->CreateMCJITCompilerForModule(&engine, state.module, &options, sizeof(options), &error)) {
516            dataLog("FATAL: Could not create LLVM execution engine: ", error, "\n");
517            CRASH();
518        }
519
520        LLVMPassManagerRef functionPasses = 0;
521        LLVMPassManagerRef modulePasses;
522
523        if (Options::llvmSimpleOpt()) {
524            modulePasses = llvm->CreatePassManager();
525            llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
526            llvm->AddPromoteMemoryToRegisterPass(modulePasses);
527            llvm->AddConstantPropagationPass(modulePasses);
528            llvm->AddInstructionCombiningPass(modulePasses);
529            llvm->AddTypeBasedAliasAnalysisPass(modulePasses);
530            llvm->AddBasicAliasAnalysisPass(modulePasses);
531            llvm->AddGVNPass(modulePasses);
532            llvm->AddCFGSimplificationPass(modulePasses);
533            llvm->AddDeadStoreEliminationPass(modulePasses);
534            llvm->RunPassManager(modulePasses, state.module);
535        } else {
536            LLVMPassManagerBuilderRef passBuilder = llvm->PassManagerBuilderCreate();
537            llvm->PassManagerBuilderSetOptLevel(passBuilder, Options::llvmOptimizationLevel());
538            llvm->PassManagerBuilderSetSizeLevel(passBuilder, Options::llvmSizeLevel());
539
540            functionPasses = llvm->CreateFunctionPassManagerForModule(state.module);
541            modulePasses = llvm->CreatePassManager();
542
543            llvm->AddTargetData(llvm->GetExecutionEngineTargetData(engine), modulePasses);
544
545            llvm->PassManagerBuilderPopulateFunctionPassManager(passBuilder, functionPasses);
546            llvm->PassManagerBuilderPopulateModulePassManager(passBuilder, modulePasses);
547
548            llvm->PassManagerBuilderDispose(passBuilder);
549
550            llvm->InitializeFunctionPassManager(functionPasses);
551            for (LValue function = llvm->GetFirstFunction(state.module); function; function = llvm->GetNextFunction(function))
552                llvm->RunFunctionPassManager(functionPasses, function);
553            llvm->FinalizeFunctionPassManager(functionPasses);
554
555            llvm->RunPassManager(modulePasses, state.module);
556        }
557
558        if (shouldShowDisassembly() || verboseCompilationEnabled())
559            state.dumpState("after optimization");
560
561        // FIXME: Need to add support for the case where JIT memory allocation failed.
562        // https://bugs.webkit.org/show_bug.cgi?id=113620
563        state.generatedFunction = reinterpret_cast<GeneratedFunction>(llvm->GetPointerToGlobal(engine, state.function));
564        if (functionPasses)
565            llvm->DisposePassManager(functionPasses);
566        llvm->DisposePassManager(modulePasses);
567        llvm->DisposeExecutionEngine(engine);
568    }
569    if (safepointResult.didGetCancelled())
570        return;
571    RELEASE_ASSERT(!state.graph.m_vm.heap.isCollecting());
572
573    if (shouldShowDisassembly()) {
574        for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
575            ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
576            dataLog(
577                "Generated LLVM code for ",
578                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
579                " #", i, ", ", state.codeSectionNames[i], ":\n");
580            disassemble(
581                MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
582                "    ", WTF::dataFile(), LLVMSubset);
583        }
584
585        for (unsigned i = 0; i < state.jitCode->dataSections().size(); ++i) {
586            DataSection* section = state.jitCode->dataSections()[i].get();
587            dataLog(
588                "Generated LLVM data section for ",
589                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
590                " #", i, ", ", state.dataSectionNames[i], ":\n");
591            dumpDataSection(section, "    ");
592        }
593    }
594
595    bool didSeeUnwindInfo = state.jitCode->unwindInfo.parse(
596        state.compactUnwind, state.compactUnwindSize, state.generatedFunction);
597    if (shouldShowDisassembly()) {
598        dataLog("Unwind info for ", CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
599        if (didSeeUnwindInfo)
600            dataLog("    ", state.jitCode->unwindInfo, "\n");
601        else
602            dataLog("    <no unwind info>\n");
603    }
604
605    if (state.stackmapsSection && state.stackmapsSection->size()) {
606        if (shouldShowDisassembly()) {
607            dataLog(
608                "Generated LLVM stackmaps section for ",
609                CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT), ":\n");
610            dataLog("    Raw data:\n");
611            dumpDataSection(state.stackmapsSection.get(), "    ");
612        }
613
614        RefPtr<DataView> stackmapsData = DataView::create(
615            ArrayBuffer::create(state.stackmapsSection->base(), state.stackmapsSection->size()));
616        state.jitCode->stackmaps.parse(stackmapsData.get());
617
618        if (shouldShowDisassembly()) {
619            dataLog("    Structured data:\n");
620            state.jitCode->stackmaps.dumpMultiline(WTF::dataFile(), "        ");
621        }
622
623        StackMaps::RecordMap recordMap = state.jitCode->stackmaps.computeRecordMap();
624        fixFunctionBasedOnStackMaps(
625            state, state.graph.m_codeBlock, state.jitCode.get(), state.generatedFunction,
626            recordMap, didSeeUnwindInfo);
627
628        if (shouldShowDisassembly()) {
629            for (unsigned i = 0; i < state.jitCode->handles().size(); ++i) {
630                if (state.codeSectionNames[i] != "__text")
631                    continue;
632
633                ExecutableMemoryHandle* handle = state.jitCode->handles()[i].get();
634                dataLog(
635                    "Generated LLVM code after stackmap-based fix-up for ",
636                    CodeBlockWithJITType(state.graph.m_codeBlock, JITCode::FTLJIT),
637                    " in ", state.graph.m_plan.mode, " #", i, ", ",
638                    state.codeSectionNames[i], ":\n");
639                disassemble(
640                    MacroAssemblerCodePtr(handle->start()), handle->sizeInBytes(),
641                    "    ", WTF::dataFile(), LLVMSubset);
642            }
643        }
644    }
645
646    state.module = 0; // We no longer own the module.
647}
648
649} } // namespace JSC::FTL
650
651#endif // ENABLE(FTL_JIT)
652
653