1/*
2 * Copyright (C) 2008, 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1.  Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 * 2.  Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15 *     its contributors may be used to endorse or promote products derived
16 *     from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include "config.h"
31#include "CodeBlock.h"
32
33#include "BytecodeGenerator.h"
34#include "BytecodeUseDef.h"
35#include "CallLinkStatus.h"
36#include "DFGCapabilities.h"
37#include "DFGCommon.h"
38#include "DFGDriver.h"
39#include "DFGJITCode.h"
40#include "DFGWorklist.h"
41#include "Debugger.h"
42#include "Interpreter.h"
43#include "JIT.h"
44#include "JITStubs.h"
45#include "JSActivation.h"
46#include "JSCJSValue.h"
47#include "JSFunction.h"
48#include "JSNameScope.h"
49#include "LLIntEntrypoint.h"
50#include "LowLevelInterpreter.h"
51#include "JSCInlines.h"
52#include "PolymorphicGetByIdList.h"
53#include "PolymorphicPutByIdList.h"
54#include "ProfilerDatabase.h"
55#include "ReduceWhitespace.h"
56#include "Repatch.h"
57#include "RepatchBuffer.h"
58#include "SlotVisitorInlines.h"
59#include "UnlinkedInstructionStream.h"
60#include <wtf/BagToHashMap.h>
61#include <wtf/CommaPrinter.h>
62#include <wtf/StringExtras.h>
63#include <wtf/StringPrintStream.h>
64
65#if ENABLE(DFG_JIT)
66#include "DFGOperations.h"
67#endif
68
69#if ENABLE(FTL_JIT)
70#include "FTLJITCode.h"
71#endif
72
73namespace JSC {
74
75CString CodeBlock::inferredName() const
76{
77    switch (codeType()) {
78    case GlobalCode:
79        return "<global>";
80    case EvalCode:
81        return "<eval>";
82    case FunctionCode:
83        return jsCast<FunctionExecutable*>(ownerExecutable())->inferredName().utf8();
84    default:
85        CRASH();
86        return CString("", 0);
87    }
88}
89
90bool CodeBlock::hasHash() const
91{
92    return !!m_hash;
93}
94
95bool CodeBlock::isSafeToComputeHash() const
96{
97    return !isCompilationThread();
98}
99
100CodeBlockHash CodeBlock::hash() const
101{
102    if (!m_hash) {
103        RELEASE_ASSERT(isSafeToComputeHash());
104        m_hash = CodeBlockHash(ownerExecutable()->source(), specializationKind());
105    }
106    return m_hash;
107}
108
109CString CodeBlock::sourceCodeForTools() const
110{
111    if (codeType() != FunctionCode)
112        return ownerExecutable()->source().toUTF8();
113
114    SourceProvider* provider = source();
115    FunctionExecutable* executable = jsCast<FunctionExecutable*>(ownerExecutable());
116    UnlinkedFunctionExecutable* unlinked = executable->unlinkedExecutable();
117    unsigned unlinkedStartOffset = unlinked->startOffset();
118    unsigned linkedStartOffset = executable->source().startOffset();
119    int delta = linkedStartOffset - unlinkedStartOffset;
120    unsigned rangeStart = delta + unlinked->unlinkedFunctionNameStart();
121    unsigned rangeEnd = delta + unlinked->startOffset() + unlinked->sourceLength();
122    return toCString(
123        "function ",
124        provider->source().impl()->utf8ForRange(rangeStart, rangeEnd - rangeStart));
125}
126
127CString CodeBlock::sourceCodeOnOneLine() const
128{
129    return reduceWhitespace(sourceCodeForTools());
130}
131
132CString CodeBlock::hashAsStringIfPossible() const
133{
134    if (hasHash() || isSafeToComputeHash())
135        return toCString(hash());
136    return "<no-hash>";
137}
138
139void CodeBlock::dumpAssumingJITType(PrintStream& out, JITCode::JITType jitType) const
140{
141    out.print(inferredName(), "#", hashAsStringIfPossible());
142    out.print(":[", RawPointer(this), "->");
143    if (!!m_alternative)
144        out.print(RawPointer(m_alternative.get()), "->");
145    out.print(RawPointer(ownerExecutable()), ", ", jitType, codeType());
146
147    if (codeType() == FunctionCode)
148        out.print(specializationKind());
149    out.print(", ", instructionCount());
150    if (this->jitType() == JITCode::BaselineJIT && m_shouldAlwaysBeInlined)
151        out.print(" (SABI)");
152    if (ownerExecutable()->neverInline())
153        out.print(" (NeverInline)");
154    if (ownerExecutable()->isStrictMode())
155        out.print(" (StrictMode)");
156    if (this->jitType() == JITCode::BaselineJIT && m_didFailFTLCompilation)
157        out.print(" (FTLFail)");
158    if (this->jitType() == JITCode::BaselineJIT && m_hasBeenCompiledWithFTL)
159        out.print(" (HadFTLReplacement)");
160    out.print("]");
161}
162
163void CodeBlock::dump(PrintStream& out) const
164{
165    dumpAssumingJITType(out, jitType());
166}
167
168static CString constantName(int k, JSValue value)
169{
170    return toCString(value, "(@k", k - FirstConstantRegisterIndex, ")");
171}
172
173static CString idName(int id0, const Identifier& ident)
174{
175    return toCString(ident.impl(), "(@id", id0, ")");
176}
177
178CString CodeBlock::registerName(int r) const
179{
180    if (r == missingThisObjectMarker())
181        return "<null>";
182
183    if (isConstantRegisterIndex(r))
184        return constantName(r, getConstant(r));
185
186    if (operandIsArgument(r)) {
187        if (!VirtualRegister(r).toArgument())
188            return "this";
189        return toCString("arg", VirtualRegister(r).toArgument());
190    }
191
192    return toCString("loc", VirtualRegister(r).toLocal());
193}
194
195static CString regexpToSourceString(RegExp* regExp)
196{
197    char postfix[5] = { '/', 0, 0, 0, 0 };
198    int index = 1;
199    if (regExp->global())
200        postfix[index++] = 'g';
201    if (regExp->ignoreCase())
202        postfix[index++] = 'i';
203    if (regExp->multiline())
204        postfix[index] = 'm';
205
206    return toCString("/", regExp->pattern().impl(), postfix);
207}
208
209static CString regexpName(int re, RegExp* regexp)
210{
211    return toCString(regexpToSourceString(regexp), "(@re", re, ")");
212}
213
214NEVER_INLINE static const char* debugHookName(int debugHookID)
215{
216    switch (static_cast<DebugHookID>(debugHookID)) {
217        case DidEnterCallFrame:
218            return "didEnterCallFrame";
219        case WillLeaveCallFrame:
220            return "willLeaveCallFrame";
221        case WillExecuteStatement:
222            return "willExecuteStatement";
223        case WillExecuteProgram:
224            return "willExecuteProgram";
225        case DidExecuteProgram:
226            return "didExecuteProgram";
227        case DidReachBreakpoint:
228            return "didReachBreakpoint";
229    }
230
231    RELEASE_ASSERT_NOT_REACHED();
232    return "";
233}
234
235void CodeBlock::printUnaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
236{
237    int r0 = (++it)->u.operand;
238    int r1 = (++it)->u.operand;
239
240    printLocationAndOp(out, exec, location, it, op);
241    out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
242}
243
244void CodeBlock::printBinaryOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
245{
246    int r0 = (++it)->u.operand;
247    int r1 = (++it)->u.operand;
248    int r2 = (++it)->u.operand;
249    printLocationAndOp(out, exec, location, it, op);
250    out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
251}
252
253void CodeBlock::printConditionalJump(PrintStream& out, ExecState* exec, const Instruction*, const Instruction*& it, int location, const char* op)
254{
255    int r0 = (++it)->u.operand;
256    int offset = (++it)->u.operand;
257    printLocationAndOp(out, exec, location, it, op);
258    out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset);
259}
260
261void CodeBlock::printGetByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it)
262{
263    const char* op;
264    switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
265    case op_get_by_id:
266        op = "get_by_id";
267        break;
268    case op_get_by_id_out_of_line:
269        op = "get_by_id_out_of_line";
270        break;
271    case op_get_array_length:
272        op = "array_length";
273        break;
274    default:
275        RELEASE_ASSERT_NOT_REACHED();
276        op = 0;
277    }
278    int r0 = (++it)->u.operand;
279    int r1 = (++it)->u.operand;
280    int id0 = (++it)->u.operand;
281    printLocationAndOp(out, exec, location, it, op);
282    out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
283    it += 4; // Increment up to the value profiler.
284}
285
286static void dumpStructure(PrintStream& out, const char* name, ExecState* exec, Structure* structure, const Identifier& ident)
287{
288    if (!structure)
289        return;
290
291    out.printf("%s = %p", name, structure);
292
293    PropertyOffset offset = structure->getConcurrently(exec->vm(), ident.impl());
294    if (offset != invalidOffset)
295        out.printf(" (offset = %d)", offset);
296}
297
298#if ENABLE(JIT) // unused when not ENABLE(JIT), leading to silly warnings
299static void dumpChain(PrintStream& out, ExecState* exec, StructureChain* chain, const Identifier& ident)
300{
301    out.printf("chain = %p: [", chain);
302    bool first = true;
303    for (WriteBarrier<Structure>* currentStructure = chain->head();
304         *currentStructure;
305         ++currentStructure) {
306        if (first)
307            first = false;
308        else
309            out.printf(", ");
310        dumpStructure(out, "struct", exec, currentStructure->get(), ident);
311    }
312    out.printf("]");
313}
314#endif
315
316void CodeBlock::printGetByIdCacheStatus(PrintStream& out, ExecState* exec, int location, const StubInfoMap& map)
317{
318    Instruction* instruction = instructions().begin() + location;
319
320    const Identifier& ident = identifier(instruction[3].u.operand);
321
322    UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations.
323
324    if (exec->interpreter()->getOpcodeID(instruction[0].u.opcode) == op_get_array_length)
325        out.printf(" llint(array_length)");
326    else if (Structure* structure = instruction[4].u.structure.get()) {
327        out.printf(" llint(");
328        dumpStructure(out, "struct", exec, structure, ident);
329        out.printf(")");
330    }
331
332#if ENABLE(JIT)
333    if (StructureStubInfo* stubPtr = map.get(CodeOrigin(location))) {
334        StructureStubInfo& stubInfo = *stubPtr;
335        if (stubInfo.resetByGC)
336            out.print(" (Reset By GC)");
337
338        if (stubInfo.seen) {
339            out.printf(" jit(");
340
341            Structure* baseStructure = 0;
342            Structure* prototypeStructure = 0;
343            StructureChain* chain = 0;
344            PolymorphicGetByIdList* list = 0;
345
346            switch (stubInfo.accessType) {
347            case access_get_by_id_self:
348                out.printf("self");
349                baseStructure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
350                break;
351            case access_get_by_id_chain:
352                out.printf("chain");
353                baseStructure = stubInfo.u.getByIdChain.baseObjectStructure.get();
354                chain = stubInfo.u.getByIdChain.chain.get();
355                break;
356            case access_get_by_id_list:
357                out.printf("list");
358                list = stubInfo.u.getByIdList.list;
359                break;
360            case access_unset:
361                out.printf("unset");
362                break;
363            default:
364                RELEASE_ASSERT_NOT_REACHED();
365                break;
366            }
367
368            if (baseStructure) {
369                out.printf(", ");
370                dumpStructure(out, "struct", exec, baseStructure, ident);
371            }
372
373            if (prototypeStructure) {
374                out.printf(", ");
375                dumpStructure(out, "prototypeStruct", exec, baseStructure, ident);
376            }
377
378            if (chain) {
379                out.printf(", ");
380                dumpChain(out, exec, chain, ident);
381            }
382
383            if (list) {
384                out.printf(", list = %p: [", list);
385                for (unsigned i = 0; i < list->size(); ++i) {
386                    if (i)
387                        out.printf(", ");
388                    out.printf("(");
389                    dumpStructure(out, "base", exec, list->at(i).structure(), ident);
390                    if (list->at(i).chain()) {
391                        out.printf(", ");
392                        dumpChain(out, exec, list->at(i).chain(), ident);
393                    }
394                    out.printf(")");
395                }
396                out.printf("]");
397            }
398            out.printf(")");
399        }
400    }
401#else
402    UNUSED_PARAM(map);
403#endif
404}
405
406void CodeBlock::printCallOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const CallLinkInfoMap& map)
407{
408    int dst = (++it)->u.operand;
409    int func = (++it)->u.operand;
410    int argCount = (++it)->u.operand;
411    int registerOffset = (++it)->u.operand;
412    printLocationAndOp(out, exec, location, it, op);
413    out.printf("%s, %s, %d, %d", registerName(dst).data(), registerName(func).data(), argCount, registerOffset);
414    if (cacheDumpMode == DumpCaches) {
415        LLIntCallLinkInfo* callLinkInfo = it[1].u.callLinkInfo;
416        if (callLinkInfo->lastSeenCallee) {
417            out.printf(
418                " llint(%p, exec %p)",
419                callLinkInfo->lastSeenCallee.get(),
420                callLinkInfo->lastSeenCallee->executable());
421        }
422#if ENABLE(JIT)
423        if (CallLinkInfo* info = map.get(CodeOrigin(location))) {
424            JSFunction* target = info->lastSeenCallee.get();
425            if (target)
426                out.printf(" jit(%p, exec %p)", target, target->executable());
427        }
428        out.print(" status(", CallLinkStatus::computeFor(this, location, map), ")");
429#else
430        UNUSED_PARAM(map);
431#endif
432    }
433    ++it;
434    ++it;
435    dumpArrayProfiling(out, it, hasPrintedProfiling);
436    dumpValueProfiling(out, it, hasPrintedProfiling);
437}
438
439void CodeBlock::printPutByIdOp(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op)
440{
441    int r0 = (++it)->u.operand;
442    int id0 = (++it)->u.operand;
443    int r1 = (++it)->u.operand;
444    printLocationAndOp(out, exec, location, it, op);
445    out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data());
446    it += 5;
447}
448
449void CodeBlock::dumpBytecode(PrintStream& out)
450{
451    // We only use the ExecState* for things that don't actually lead to JS execution,
452    // like converting a JSString to a String. Hence the globalExec is appropriate.
453    ExecState* exec = m_globalObject->globalExec();
454
455    size_t instructionCount = 0;
456
457    for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
458        ++instructionCount;
459
460    out.print(*this);
461    out.printf(
462        ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)",
463        static_cast<unsigned long>(instructions().size()),
464        static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
465        m_numParameters, m_numCalleeRegisters, m_numVars);
466    if (symbolTable() && symbolTable()->captureCount()) {
467        out.printf(
468            "; %d captured var(s) (from r%d to r%d, inclusive)",
469            symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
470    }
471    if (usesArguments()) {
472        out.printf(
473            "; uses arguments, in r%d, r%d",
474            argumentsRegister().offset(),
475            unmodifiedArgumentsRegister(argumentsRegister()).offset());
476    }
477    if (needsActivation() && codeType() == FunctionCode)
478        out.printf("; activation in r%d", activationRegister().offset());
479    out.printf("\n");
480
481    StubInfoMap stubInfos;
482    CallLinkInfoMap callLinkInfos;
483    getStubInfoMap(stubInfos);
484    getCallLinkInfoMap(callLinkInfos);
485
486    const Instruction* begin = instructions().begin();
487    const Instruction* end = instructions().end();
488    for (const Instruction* it = begin; it != end; ++it)
489        dumpBytecode(out, exec, begin, it, stubInfos, callLinkInfos);
490
491    if (numberOfIdentifiers()) {
492        out.printf("\nIdentifiers:\n");
493        size_t i = 0;
494        do {
495            out.printf("  id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());
496            ++i;
497        } while (i != numberOfIdentifiers());
498    }
499
500    if (!m_constantRegisters.isEmpty()) {
501        out.printf("\nConstants:\n");
502        size_t i = 0;
503        do {
504            out.printf("   k%u = %s\n", static_cast<unsigned>(i), toCString(m_constantRegisters[i].get()).data());
505            ++i;
506        } while (i < m_constantRegisters.size());
507    }
508
509    if (size_t count = m_unlinkedCode->numberOfRegExps()) {
510        out.printf("\nm_regexps:\n");
511        size_t i = 0;
512        do {
513            out.printf("  re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_unlinkedCode->regexp(i)).data());
514            ++i;
515        } while (i < count);
516    }
517
518    if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
519        out.printf("\nException Handlers:\n");
520        unsigned i = 0;
521        do {
522            out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] depth: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target, m_rareData->m_exceptionHandlers[i].scopeDepth);
523            ++i;
524        } while (i < m_rareData->m_exceptionHandlers.size());
525    }
526
527    if (m_rareData && !m_rareData->m_switchJumpTables.isEmpty()) {
528        out.printf("Switch Jump Tables:\n");
529        unsigned i = 0;
530        do {
531            out.printf("  %1d = {\n", i);
532            int entry = 0;
533            Vector<int32_t>::const_iterator end = m_rareData->m_switchJumpTables[i].branchOffsets.end();
534            for (Vector<int32_t>::const_iterator iter = m_rareData->m_switchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
535                if (!*iter)
536                    continue;
537                out.printf("\t\t%4d => %04d\n", entry + m_rareData->m_switchJumpTables[i].min, *iter);
538            }
539            out.printf("      }\n");
540            ++i;
541        } while (i < m_rareData->m_switchJumpTables.size());
542    }
543
544    if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
545        out.printf("\nString Switch Jump Tables:\n");
546        unsigned i = 0;
547        do {
548            out.printf("  %1d = {\n", i);
549            StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
550            for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
551                out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);
552            out.printf("      }\n");
553            ++i;
554        } while (i < m_rareData->m_stringSwitchJumpTables.size());
555    }
556
557    out.printf("\n");
558}
559
560void CodeBlock::beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)
561{
562    if (hasPrintedProfiling) {
563        out.print("; ");
564        return;
565    }
566
567    out.print("    ");
568    hasPrintedProfiling = true;
569}
570
571void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
572{
573    ConcurrentJITLocker locker(m_lock);
574
575    ++it;
576    CString description = it->u.profile->briefDescription(locker);
577    if (!description.length())
578        return;
579    beginDumpProfiling(out, hasPrintedProfiling);
580    out.print(description);
581}
582
583void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
584{
585    ConcurrentJITLocker locker(m_lock);
586
587    ++it;
588    if (!it->u.arrayProfile)
589        return;
590    CString description = it->u.arrayProfile->briefDescription(locker, this);
591    if (!description.length())
592        return;
593    beginDumpProfiling(out, hasPrintedProfiling);
594    out.print(description);
595}
596
597void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
598{
599    if (!profile || !profile->m_counter)
600        return;
601
602    beginDumpProfiling(out, hasPrintedProfiling);
603    out.print(name, profile->m_counter);
604}
605
606void CodeBlock::printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
607{
608    out.printf("[%4d] %-17s ", location, op);
609}
610
611void CodeBlock::printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
612{
613    printLocationAndOp(out, exec, location, it, op);
614    out.printf("%s", registerName(operand).data());
615}
616
617void CodeBlock::dumpBytecode(
618    PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it,
619    const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
620{
621    int location = it - begin;
622    bool hasPrintedProfiling = false;
623    OpcodeID opcode = exec->interpreter()->getOpcodeID(it->u.opcode);
624    switch (opcode) {
625        case op_enter: {
626            printLocationAndOp(out, exec, location, it, "enter");
627            break;
628        }
629        case op_touch_entry: {
630            printLocationAndOp(out, exec, location, it, "touch_entry");
631            break;
632        }
633        case op_create_activation: {
634            int r0 = (++it)->u.operand;
635            printLocationOpAndRegisterOperand(out, exec, location, it, "create_activation", r0);
636            break;
637        }
638        case op_create_arguments: {
639            int r0 = (++it)->u.operand;
640            printLocationOpAndRegisterOperand(out, exec, location, it, "create_arguments", r0);
641            break;
642        }
643        case op_init_lazy_reg: {
644            int r0 = (++it)->u.operand;
645            printLocationOpAndRegisterOperand(out, exec, location, it, "init_lazy_reg", r0);
646            break;
647        }
648        case op_get_callee: {
649            int r0 = (++it)->u.operand;
650            printLocationOpAndRegisterOperand(out, exec, location, it, "get_callee", r0);
651            ++it;
652            break;
653        }
654        case op_create_this: {
655            int r0 = (++it)->u.operand;
656            int r1 = (++it)->u.operand;
657            unsigned inferredInlineCapacity = (++it)->u.operand;
658            printLocationAndOp(out, exec, location, it, "create_this");
659            out.printf("%s, %s, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity);
660            break;
661        }
662        case op_to_this: {
663            int r0 = (++it)->u.operand;
664            printLocationOpAndRegisterOperand(out, exec, location, it, "to_this", r0);
665            Structure* structure = (++it)->u.structure.get();
666            if (structure)
667                out.print(" cache(struct = ", RawPointer(structure), ")");
668            break;
669        }
670        case op_new_object: {
671            int r0 = (++it)->u.operand;
672            unsigned inferredInlineCapacity = (++it)->u.operand;
673            printLocationAndOp(out, exec, location, it, "new_object");
674            out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity);
675            ++it; // Skip object allocation profile.
676            break;
677        }
678        case op_new_array: {
679            int dst = (++it)->u.operand;
680            int argv = (++it)->u.operand;
681            int argc = (++it)->u.operand;
682            printLocationAndOp(out, exec, location, it, "new_array");
683            out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc);
684            ++it; // Skip array allocation profile.
685            break;
686        }
687        case op_new_array_with_size: {
688            int dst = (++it)->u.operand;
689            int length = (++it)->u.operand;
690            printLocationAndOp(out, exec, location, it, "new_array_with_size");
691            out.printf("%s, %s", registerName(dst).data(), registerName(length).data());
692            ++it; // Skip array allocation profile.
693            break;
694        }
695        case op_new_array_buffer: {
696            int dst = (++it)->u.operand;
697            int argv = (++it)->u.operand;
698            int argc = (++it)->u.operand;
699            printLocationAndOp(out, exec, location, it, "new_array_buffer");
700            out.printf("%s, %d, %d", registerName(dst).data(), argv, argc);
701            ++it; // Skip array allocation profile.
702            break;
703        }
704        case op_new_regexp: {
705            int r0 = (++it)->u.operand;
706            int re0 = (++it)->u.operand;
707            printLocationAndOp(out, exec, location, it, "new_regexp");
708            out.printf("%s, ", registerName(r0).data());
709            if (r0 >=0 && r0 < (int)m_unlinkedCode->numberOfRegExps())
710                out.printf("%s", regexpName(re0, regexp(re0)).data());
711            else
712                out.printf("bad_regexp(%d)", re0);
713            break;
714        }
715        case op_mov: {
716            int r0 = (++it)->u.operand;
717            int r1 = (++it)->u.operand;
718            printLocationAndOp(out, exec, location, it, "mov");
719            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
720            break;
721        }
722        case op_captured_mov: {
723            int r0 = (++it)->u.operand;
724            int r1 = (++it)->u.operand;
725            printLocationAndOp(out, exec, location, it, "captured_mov");
726            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
727            ++it;
728            break;
729        }
730        case op_not: {
731            printUnaryOp(out, exec, location, it, "not");
732            break;
733        }
734        case op_eq: {
735            printBinaryOp(out, exec, location, it, "eq");
736            break;
737        }
738        case op_eq_null: {
739            printUnaryOp(out, exec, location, it, "eq_null");
740            break;
741        }
742        case op_neq: {
743            printBinaryOp(out, exec, location, it, "neq");
744            break;
745        }
746        case op_neq_null: {
747            printUnaryOp(out, exec, location, it, "neq_null");
748            break;
749        }
750        case op_stricteq: {
751            printBinaryOp(out, exec, location, it, "stricteq");
752            break;
753        }
754        case op_nstricteq: {
755            printBinaryOp(out, exec, location, it, "nstricteq");
756            break;
757        }
758        case op_less: {
759            printBinaryOp(out, exec, location, it, "less");
760            break;
761        }
762        case op_lesseq: {
763            printBinaryOp(out, exec, location, it, "lesseq");
764            break;
765        }
766        case op_greater: {
767            printBinaryOp(out, exec, location, it, "greater");
768            break;
769        }
770        case op_greatereq: {
771            printBinaryOp(out, exec, location, it, "greatereq");
772            break;
773        }
774        case op_inc: {
775            int r0 = (++it)->u.operand;
776            printLocationOpAndRegisterOperand(out, exec, location, it, "inc", r0);
777            break;
778        }
779        case op_dec: {
780            int r0 = (++it)->u.operand;
781            printLocationOpAndRegisterOperand(out, exec, location, it, "dec", r0);
782            break;
783        }
784        case op_to_number: {
785            printUnaryOp(out, exec, location, it, "to_number");
786            break;
787        }
788        case op_negate: {
789            printUnaryOp(out, exec, location, it, "negate");
790            break;
791        }
792        case op_add: {
793            printBinaryOp(out, exec, location, it, "add");
794            ++it;
795            break;
796        }
797        case op_mul: {
798            printBinaryOp(out, exec, location, it, "mul");
799            ++it;
800            break;
801        }
802        case op_div: {
803            printBinaryOp(out, exec, location, it, "div");
804            ++it;
805            break;
806        }
807        case op_mod: {
808            printBinaryOp(out, exec, location, it, "mod");
809            break;
810        }
811        case op_sub: {
812            printBinaryOp(out, exec, location, it, "sub");
813            ++it;
814            break;
815        }
816        case op_lshift: {
817            printBinaryOp(out, exec, location, it, "lshift");
818            break;
819        }
820        case op_rshift: {
821            printBinaryOp(out, exec, location, it, "rshift");
822            break;
823        }
824        case op_urshift: {
825            printBinaryOp(out, exec, location, it, "urshift");
826            break;
827        }
828        case op_bitand: {
829            printBinaryOp(out, exec, location, it, "bitand");
830            ++it;
831            break;
832        }
833        case op_bitxor: {
834            printBinaryOp(out, exec, location, it, "bitxor");
835            ++it;
836            break;
837        }
838        case op_bitor: {
839            printBinaryOp(out, exec, location, it, "bitor");
840            ++it;
841            break;
842        }
843        case op_check_has_instance: {
844            int r0 = (++it)->u.operand;
845            int r1 = (++it)->u.operand;
846            int r2 = (++it)->u.operand;
847            int offset = (++it)->u.operand;
848            printLocationAndOp(out, exec, location, it, "check_has_instance");
849            out.printf("%s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), offset, location + offset);
850            break;
851        }
852        case op_instanceof: {
853            int r0 = (++it)->u.operand;
854            int r1 = (++it)->u.operand;
855            int r2 = (++it)->u.operand;
856            printLocationAndOp(out, exec, location, it, "instanceof");
857            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
858            break;
859        }
860        case op_unsigned: {
861            printUnaryOp(out, exec, location, it, "unsigned");
862            break;
863        }
864        case op_typeof: {
865            printUnaryOp(out, exec, location, it, "typeof");
866            break;
867        }
868        case op_is_undefined: {
869            printUnaryOp(out, exec, location, it, "is_undefined");
870            break;
871        }
872        case op_is_boolean: {
873            printUnaryOp(out, exec, location, it, "is_boolean");
874            break;
875        }
876        case op_is_number: {
877            printUnaryOp(out, exec, location, it, "is_number");
878            break;
879        }
880        case op_is_string: {
881            printUnaryOp(out, exec, location, it, "is_string");
882            break;
883        }
884        case op_is_object: {
885            printUnaryOp(out, exec, location, it, "is_object");
886            break;
887        }
888        case op_is_function: {
889            printUnaryOp(out, exec, location, it, "is_function");
890            break;
891        }
892        case op_in: {
893            printBinaryOp(out, exec, location, it, "in");
894            break;
895        }
896        case op_init_global_const_nop: {
897            printLocationAndOp(out, exec, location, it, "init_global_const_nop");
898            it++;
899            it++;
900            it++;
901            it++;
902            break;
903        }
904        case op_init_global_const: {
905            WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer;
906            int r0 = (++it)->u.operand;
907            printLocationAndOp(out, exec, location, it, "init_global_const");
908            out.printf("g%d(%p), %s", m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(r0).data());
909            it++;
910            it++;
911            break;
912        }
913        case op_get_by_id:
914        case op_get_by_id_out_of_line:
915        case op_get_array_length: {
916            printGetByIdOp(out, exec, location, it);
917            printGetByIdCacheStatus(out, exec, location, stubInfos);
918            dumpValueProfiling(out, it, hasPrintedProfiling);
919            break;
920        }
921        case op_get_arguments_length: {
922            printUnaryOp(out, exec, location, it, "get_arguments_length");
923            it++;
924            break;
925        }
926        case op_put_by_id: {
927            printPutByIdOp(out, exec, location, it, "put_by_id");
928            break;
929        }
930        case op_put_by_id_out_of_line: {
931            printPutByIdOp(out, exec, location, it, "put_by_id_out_of_line");
932            break;
933        }
934        case op_put_by_id_transition_direct: {
935            printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct");
936            break;
937        }
938        case op_put_by_id_transition_direct_out_of_line: {
939            printPutByIdOp(out, exec, location, it, "put_by_id_transition_direct_out_of_line");
940            break;
941        }
942        case op_put_by_id_transition_normal: {
943            printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal");
944            break;
945        }
946        case op_put_by_id_transition_normal_out_of_line: {
947            printPutByIdOp(out, exec, location, it, "put_by_id_transition_normal_out_of_line");
948            break;
949        }
950        case op_put_getter_setter: {
951            int r0 = (++it)->u.operand;
952            int id0 = (++it)->u.operand;
953            int r1 = (++it)->u.operand;
954            int r2 = (++it)->u.operand;
955            printLocationAndOp(out, exec, location, it, "put_getter_setter");
956            out.printf("%s, %s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(), registerName(r2).data());
957            break;
958        }
959        case op_del_by_id: {
960            int r0 = (++it)->u.operand;
961            int r1 = (++it)->u.operand;
962            int id0 = (++it)->u.operand;
963            printLocationAndOp(out, exec, location, it, "del_by_id");
964            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data());
965            break;
966        }
967        case op_get_by_val: {
968            int r0 = (++it)->u.operand;
969            int r1 = (++it)->u.operand;
970            int r2 = (++it)->u.operand;
971            printLocationAndOp(out, exec, location, it, "get_by_val");
972            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
973            dumpArrayProfiling(out, it, hasPrintedProfiling);
974            dumpValueProfiling(out, it, hasPrintedProfiling);
975            break;
976        }
977        case op_get_argument_by_val: {
978            int r0 = (++it)->u.operand;
979            int r1 = (++it)->u.operand;
980            int r2 = (++it)->u.operand;
981            printLocationAndOp(out, exec, location, it, "get_argument_by_val");
982            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
983            ++it;
984            dumpValueProfiling(out, it, hasPrintedProfiling);
985            break;
986        }
987        case op_get_by_pname: {
988            int r0 = (++it)->u.operand;
989            int r1 = (++it)->u.operand;
990            int r2 = (++it)->u.operand;
991            int r3 = (++it)->u.operand;
992            int r4 = (++it)->u.operand;
993            int r5 = (++it)->u.operand;
994            printLocationAndOp(out, exec, location, it, "get_by_pname");
995            out.printf("%s, %s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data(), registerName(r5).data());
996            break;
997        }
998        case op_put_by_val: {
999            int r0 = (++it)->u.operand;
1000            int r1 = (++it)->u.operand;
1001            int r2 = (++it)->u.operand;
1002            printLocationAndOp(out, exec, location, it, "put_by_val");
1003            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1004            dumpArrayProfiling(out, it, hasPrintedProfiling);
1005            break;
1006        }
1007        case op_put_by_val_direct: {
1008            int r0 = (++it)->u.operand;
1009            int r1 = (++it)->u.operand;
1010            int r2 = (++it)->u.operand;
1011            printLocationAndOp(out, exec, location, it, "put_by_val_direct");
1012            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1013            dumpArrayProfiling(out, it, hasPrintedProfiling);
1014            break;
1015        }
1016        case op_del_by_val: {
1017            int r0 = (++it)->u.operand;
1018            int r1 = (++it)->u.operand;
1019            int r2 = (++it)->u.operand;
1020            printLocationAndOp(out, exec, location, it, "del_by_val");
1021            out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data());
1022            break;
1023        }
1024        case op_put_by_index: {
1025            int r0 = (++it)->u.operand;
1026            unsigned n0 = (++it)->u.operand;
1027            int r1 = (++it)->u.operand;
1028            printLocationAndOp(out, exec, location, it, "put_by_index");
1029            out.printf("%s, %u, %s", registerName(r0).data(), n0, registerName(r1).data());
1030            break;
1031        }
1032        case op_jmp: {
1033            int offset = (++it)->u.operand;
1034            printLocationAndOp(out, exec, location, it, "jmp");
1035            out.printf("%d(->%d)", offset, location + offset);
1036            break;
1037        }
1038        case op_jtrue: {
1039            printConditionalJump(out, exec, begin, it, location, "jtrue");
1040            break;
1041        }
1042        case op_jfalse: {
1043            printConditionalJump(out, exec, begin, it, location, "jfalse");
1044            break;
1045        }
1046        case op_jeq_null: {
1047            printConditionalJump(out, exec, begin, it, location, "jeq_null");
1048            break;
1049        }
1050        case op_jneq_null: {
1051            printConditionalJump(out, exec, begin, it, location, "jneq_null");
1052            break;
1053        }
1054        case op_jneq_ptr: {
1055            int r0 = (++it)->u.operand;
1056            Special::Pointer pointer = (++it)->u.specialPointer;
1057            int offset = (++it)->u.operand;
1058            printLocationAndOp(out, exec, location, it, "jneq_ptr");
1059            out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, m_globalObject->actualPointerFor(pointer), offset, location + offset);
1060            break;
1061        }
1062        case op_jless: {
1063            int r0 = (++it)->u.operand;
1064            int r1 = (++it)->u.operand;
1065            int offset = (++it)->u.operand;
1066            printLocationAndOp(out, exec, location, it, "jless");
1067            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1068            break;
1069        }
1070        case op_jlesseq: {
1071            int r0 = (++it)->u.operand;
1072            int r1 = (++it)->u.operand;
1073            int offset = (++it)->u.operand;
1074            printLocationAndOp(out, exec, location, it, "jlesseq");
1075            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1076            break;
1077        }
1078        case op_jgreater: {
1079            int r0 = (++it)->u.operand;
1080            int r1 = (++it)->u.operand;
1081            int offset = (++it)->u.operand;
1082            printLocationAndOp(out, exec, location, it, "jgreater");
1083            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1084            break;
1085        }
1086        case op_jgreatereq: {
1087            int r0 = (++it)->u.operand;
1088            int r1 = (++it)->u.operand;
1089            int offset = (++it)->u.operand;
1090            printLocationAndOp(out, exec, location, it, "jgreatereq");
1091            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1092            break;
1093        }
1094        case op_jnless: {
1095            int r0 = (++it)->u.operand;
1096            int r1 = (++it)->u.operand;
1097            int offset = (++it)->u.operand;
1098            printLocationAndOp(out, exec, location, it, "jnless");
1099            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1100            break;
1101        }
1102        case op_jnlesseq: {
1103            int r0 = (++it)->u.operand;
1104            int r1 = (++it)->u.operand;
1105            int offset = (++it)->u.operand;
1106            printLocationAndOp(out, exec, location, it, "jnlesseq");
1107            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1108            break;
1109        }
1110        case op_jngreater: {
1111            int r0 = (++it)->u.operand;
1112            int r1 = (++it)->u.operand;
1113            int offset = (++it)->u.operand;
1114            printLocationAndOp(out, exec, location, it, "jngreater");
1115            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1116            break;
1117        }
1118        case op_jngreatereq: {
1119            int r0 = (++it)->u.operand;
1120            int r1 = (++it)->u.operand;
1121            int offset = (++it)->u.operand;
1122            printLocationAndOp(out, exec, location, it, "jngreatereq");
1123            out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset);
1124            break;
1125        }
1126        case op_loop_hint: {
1127            printLocationAndOp(out, exec, location, it, "loop_hint");
1128            break;
1129        }
1130        case op_switch_imm: {
1131            int tableIndex = (++it)->u.operand;
1132            int defaultTarget = (++it)->u.operand;
1133            int scrutineeRegister = (++it)->u.operand;
1134            printLocationAndOp(out, exec, location, it, "switch_imm");
1135            out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1136            break;
1137        }
1138        case op_switch_char: {
1139            int tableIndex = (++it)->u.operand;
1140            int defaultTarget = (++it)->u.operand;
1141            int scrutineeRegister = (++it)->u.operand;
1142            printLocationAndOp(out, exec, location, it, "switch_char");
1143            out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1144            break;
1145        }
1146        case op_switch_string: {
1147            int tableIndex = (++it)->u.operand;
1148            int defaultTarget = (++it)->u.operand;
1149            int scrutineeRegister = (++it)->u.operand;
1150            printLocationAndOp(out, exec, location, it, "switch_string");
1151            out.printf("%d, %d(->%d), %s", tableIndex, defaultTarget, location + defaultTarget, registerName(scrutineeRegister).data());
1152            break;
1153        }
1154        case op_new_func: {
1155            int r0 = (++it)->u.operand;
1156            int f0 = (++it)->u.operand;
1157            int shouldCheck = (++it)->u.operand;
1158            printLocationAndOp(out, exec, location, it, "new_func");
1159            out.printf("%s, f%d, %s", registerName(r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
1160            break;
1161        }
1162        case op_new_captured_func: {
1163            int r0 = (++it)->u.operand;
1164            int f0 = (++it)->u.operand;
1165            printLocationAndOp(out, exec, location, it, "new_captured_func");
1166            out.printf("%s, f%d", registerName(r0).data(), f0);
1167            ++it;
1168            break;
1169        }
1170        case op_new_func_exp: {
1171            int r0 = (++it)->u.operand;
1172            int f0 = (++it)->u.operand;
1173            printLocationAndOp(out, exec, location, it, "new_func_exp");
1174            out.printf("%s, f%d", registerName(r0).data(), f0);
1175            break;
1176        }
1177        case op_call: {
1178            printCallOp(out, exec, location, it, "call", DumpCaches, hasPrintedProfiling, callLinkInfos);
1179            break;
1180        }
1181        case op_call_eval: {
1182            printCallOp(out, exec, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, callLinkInfos);
1183            break;
1184        }
1185
1186        case op_construct_varargs:
1187        case op_call_varargs: {
1188            int result = (++it)->u.operand;
1189            int callee = (++it)->u.operand;
1190            int thisValue = (++it)->u.operand;
1191            int arguments = (++it)->u.operand;
1192            int firstFreeRegister = (++it)->u.operand;
1193            int varArgOffset = (++it)->u.operand;
1194            ++it;
1195            printLocationAndOp(out, exec, location, it, opcode == op_call_varargs ? "call_varargs" : "construct_varargs");
1196            out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset);
1197            dumpValueProfiling(out, it, hasPrintedProfiling);
1198            break;
1199        }
1200
1201        case op_tear_off_activation: {
1202            int r0 = (++it)->u.operand;
1203            printLocationOpAndRegisterOperand(out, exec, location, it, "tear_off_activation", r0);
1204            break;
1205        }
1206        case op_tear_off_arguments: {
1207            int r0 = (++it)->u.operand;
1208            int r1 = (++it)->u.operand;
1209            printLocationAndOp(out, exec, location, it, "tear_off_arguments");
1210            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1211            break;
1212        }
1213        case op_ret: {
1214            int r0 = (++it)->u.operand;
1215            printLocationOpAndRegisterOperand(out, exec, location, it, "ret", r0);
1216            break;
1217        }
1218        case op_ret_object_or_this: {
1219            int r0 = (++it)->u.operand;
1220            int r1 = (++it)->u.operand;
1221            printLocationAndOp(out, exec, location, it, "constructor_ret");
1222            out.printf("%s %s", registerName(r0).data(), registerName(r1).data());
1223            break;
1224        }
1225        case op_construct: {
1226            printCallOp(out, exec, location, it, "construct", DumpCaches, hasPrintedProfiling, callLinkInfos);
1227            break;
1228        }
1229        case op_strcat: {
1230            int r0 = (++it)->u.operand;
1231            int r1 = (++it)->u.operand;
1232            int count = (++it)->u.operand;
1233            printLocationAndOp(out, exec, location, it, "strcat");
1234            out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count);
1235            break;
1236        }
1237        case op_to_primitive: {
1238            int r0 = (++it)->u.operand;
1239            int r1 = (++it)->u.operand;
1240            printLocationAndOp(out, exec, location, it, "to_primitive");
1241            out.printf("%s, %s", registerName(r0).data(), registerName(r1).data());
1242            break;
1243        }
1244        case op_get_pnames: {
1245            int r0 = it[1].u.operand;
1246            int r1 = it[2].u.operand;
1247            int r2 = it[3].u.operand;
1248            int r3 = it[4].u.operand;
1249            int offset = it[5].u.operand;
1250            printLocationAndOp(out, exec, location, it, "get_pnames");
1251            out.printf("%s, %s, %s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), offset, location + offset);
1252            it += OPCODE_LENGTH(op_get_pnames) - 1;
1253            break;
1254        }
1255        case op_next_pname: {
1256            int dest = it[1].u.operand;
1257            int base = it[2].u.operand;
1258            int i = it[3].u.operand;
1259            int size = it[4].u.operand;
1260            int iter = it[5].u.operand;
1261            int offset = it[6].u.operand;
1262            printLocationAndOp(out, exec, location, it, "next_pname");
1263            out.printf("%s, %s, %s, %s, %s, %d(->%d)", registerName(dest).data(), registerName(base).data(), registerName(i).data(), registerName(size).data(), registerName(iter).data(), offset, location + offset);
1264            it += OPCODE_LENGTH(op_next_pname) - 1;
1265            break;
1266        }
1267        case op_push_with_scope: {
1268            int r0 = (++it)->u.operand;
1269            printLocationOpAndRegisterOperand(out, exec, location, it, "push_with_scope", r0);
1270            break;
1271        }
1272        case op_pop_scope: {
1273            printLocationAndOp(out, exec, location, it, "pop_scope");
1274            break;
1275        }
1276        case op_push_name_scope: {
1277            int id0 = (++it)->u.operand;
1278            int r1 = (++it)->u.operand;
1279            unsigned attributes = (++it)->u.operand;
1280            printLocationAndOp(out, exec, location, it, "push_name_scope");
1281            out.printf("%s, %s, %u", idName(id0, identifier(id0)).data(), registerName(r1).data(), attributes);
1282            break;
1283        }
1284        case op_catch: {
1285            int r0 = (++it)->u.operand;
1286            printLocationOpAndRegisterOperand(out, exec, location, it, "catch", r0);
1287            break;
1288        }
1289        case op_throw: {
1290            int r0 = (++it)->u.operand;
1291            printLocationOpAndRegisterOperand(out, exec, location, it, "throw", r0);
1292            break;
1293        }
1294        case op_throw_static_error: {
1295            int k0 = (++it)->u.operand;
1296            int k1 = (++it)->u.operand;
1297            printLocationAndOp(out, exec, location, it, "throw_static_error");
1298            out.printf("%s, %s", constantName(k0, getConstant(k0)).data(), k1 ? "true" : "false");
1299            break;
1300        }
1301        case op_debug: {
1302            int debugHookID = (++it)->u.operand;
1303            int hasBreakpointFlag = (++it)->u.operand;
1304            printLocationAndOp(out, exec, location, it, "debug");
1305            out.printf("%s %d", debugHookName(debugHookID), hasBreakpointFlag);
1306            break;
1307        }
1308        case op_profile_will_call: {
1309            int function = (++it)->u.operand;
1310            printLocationOpAndRegisterOperand(out, exec, location, it, "profile_will_call", function);
1311            break;
1312        }
1313        case op_profile_did_call: {
1314            int function = (++it)->u.operand;
1315            printLocationOpAndRegisterOperand(out, exec, location, it, "profile_did_call", function);
1316            break;
1317        }
1318        case op_end: {
1319            int r0 = (++it)->u.operand;
1320            printLocationOpAndRegisterOperand(out, exec, location, it, "end", r0);
1321            break;
1322        }
1323        case op_resolve_scope: {
1324            int r0 = (++it)->u.operand;
1325            int id0 = (++it)->u.operand;
1326            ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1327            int depth = (++it)->u.operand;
1328            printLocationAndOp(out, exec, location, it, "resolve_scope");
1329            out.printf("%s, %s, %u<%s|%s>, %d", registerName(r0).data(), idName(id0, identifier(id0)).data(),
1330                modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1331                depth);
1332            ++it;
1333            break;
1334        }
1335        case op_get_from_scope: {
1336            int r0 = (++it)->u.operand;
1337            int r1 = (++it)->u.operand;
1338            int id0 = (++it)->u.operand;
1339            ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1340            ++it; // Structure
1341            int operand = (++it)->u.operand; // Operand
1342            ++it; // Skip value profile.
1343            printLocationAndOp(out, exec, location, it, "get_from_scope");
1344            out.printf("%s, %s, %s, %u<%s|%s>, <structure>, %d",
1345                registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(),
1346                modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1347                operand);
1348            break;
1349        }
1350        case op_put_to_scope: {
1351            int r0 = (++it)->u.operand;
1352            int id0 = (++it)->u.operand;
1353            int r1 = (++it)->u.operand;
1354            ResolveModeAndType modeAndType = ResolveModeAndType((++it)->u.operand);
1355            ++it; // Structure
1356            int operand = (++it)->u.operand; // Operand
1357            printLocationAndOp(out, exec, location, it, "put_to_scope");
1358            out.printf("%s, %s, %s, %u<%s|%s>, <structure>, %d",
1359                registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data(),
1360                modeAndType.operand(), resolveModeName(modeAndType.mode()), resolveTypeName(modeAndType.type()),
1361                operand);
1362            break;
1363        }
1364        default:
1365            RELEASE_ASSERT_NOT_REACHED();
1366    }
1367
1368    dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1369    dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
1370
1371#if ENABLE(DFG_JIT)
1372    Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
1373    if (!exitSites.isEmpty()) {
1374        out.print(" !! frequent exits: ");
1375        CommaPrinter comma;
1376        for (unsigned i = 0; i < exitSites.size(); ++i)
1377            out.print(comma, exitSites[i].kind(), " ", exitSites[i].jitType());
1378    }
1379#else // ENABLE(DFG_JIT)
1380    UNUSED_PARAM(location);
1381#endif // ENABLE(DFG_JIT)
1382    out.print("\n");
1383}
1384
1385void CodeBlock::dumpBytecode(
1386    PrintStream& out, unsigned bytecodeOffset,
1387    const StubInfoMap& stubInfos, const CallLinkInfoMap& callLinkInfos)
1388{
1389    ExecState* exec = m_globalObject->globalExec();
1390    const Instruction* it = instructions().begin() + bytecodeOffset;
1391    dumpBytecode(out, exec, instructions().begin(), it, stubInfos, callLinkInfos);
1392}
1393
1394#define FOR_EACH_MEMBER_VECTOR(macro) \
1395    macro(instructions) \
1396    macro(callLinkInfos) \
1397    macro(linkedCallerList) \
1398    macro(identifiers) \
1399    macro(functionExpressions) \
1400    macro(constantRegisters)
1401
1402#define FOR_EACH_MEMBER_VECTOR_RARE_DATA(macro) \
1403    macro(regexps) \
1404    macro(functions) \
1405    macro(exceptionHandlers) \
1406    macro(switchJumpTables) \
1407    macro(stringSwitchJumpTables) \
1408    macro(evalCodeCache) \
1409    macro(expressionInfo) \
1410    macro(lineInfo) \
1411    macro(callReturnIndexVector)
1412
1413template<typename T>
1414static size_t sizeInBytes(const Vector<T>& vector)
1415{
1416    return vector.capacity() * sizeof(T);
1417}
1418
1419CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other)
1420    : m_globalObject(other.m_globalObject)
1421    , m_heap(other.m_heap)
1422    , m_numCalleeRegisters(other.m_numCalleeRegisters)
1423    , m_numVars(other.m_numVars)
1424    , m_isConstructor(other.m_isConstructor)
1425    , m_shouldAlwaysBeInlined(true)
1426    , m_didFailFTLCompilation(false)
1427    , m_hasBeenCompiledWithFTL(false)
1428    , m_unlinkedCode(*other.m_vm, other.m_ownerExecutable.get(), other.m_unlinkedCode.get())
1429    , m_hasDebuggerStatement(false)
1430    , m_steppingMode(SteppingModeDisabled)
1431    , m_numBreakpoints(0)
1432    , m_ownerExecutable(*other.m_vm, other.m_ownerExecutable.get(), other.m_ownerExecutable.get())
1433    , m_vm(other.m_vm)
1434    , m_instructions(other.m_instructions)
1435    , m_thisRegister(other.m_thisRegister)
1436    , m_argumentsRegister(other.m_argumentsRegister)
1437    , m_activationRegister(other.m_activationRegister)
1438    , m_isStrictMode(other.m_isStrictMode)
1439    , m_needsActivation(other.m_needsActivation)
1440    , m_mayBeExecuting(false)
1441    , m_visitAggregateHasBeenCalled(false)
1442    , m_source(other.m_source)
1443    , m_sourceOffset(other.m_sourceOffset)
1444    , m_firstLineColumnOffset(other.m_firstLineColumnOffset)
1445    , m_codeType(other.m_codeType)
1446    , m_constantRegisters(other.m_constantRegisters)
1447    , m_functionDecls(other.m_functionDecls)
1448    , m_functionExprs(other.m_functionExprs)
1449    , m_osrExitCounter(0)
1450    , m_optimizationDelayCounter(0)
1451    , m_reoptimizationRetryCounter(0)
1452    , m_hash(other.m_hash)
1453#if ENABLE(JIT)
1454    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1455#endif
1456{
1457    ASSERT(m_heap->isDeferred());
1458
1459    if (SymbolTable* symbolTable = other.symbolTable())
1460        m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1461
1462    setNumParameters(other.numParameters());
1463    optimizeAfterWarmUp();
1464    jitAfterWarmUp();
1465
1466    if (other.m_rareData) {
1467        createRareDataIfNecessary();
1468
1469        m_rareData->m_exceptionHandlers = other.m_rareData->m_exceptionHandlers;
1470        m_rareData->m_constantBuffers = other.m_rareData->m_constantBuffers;
1471        m_rareData->m_switchJumpTables = other.m_rareData->m_switchJumpTables;
1472        m_rareData->m_stringSwitchJumpTables = other.m_rareData->m_stringSwitchJumpTables;
1473    }
1474
1475    m_heap->m_codeBlocks.add(this);
1476    m_heap->reportExtraMemoryCost(sizeof(CodeBlock));
1477}
1478
1479CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlinkedCodeBlock, JSScope* scope, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, unsigned firstLineColumnOffset)
1480    : m_globalObject(scope->globalObject()->vm(), ownerExecutable, scope->globalObject())
1481    , m_heap(&m_globalObject->vm().heap)
1482    , m_numCalleeRegisters(unlinkedCodeBlock->m_numCalleeRegisters)
1483    , m_numVars(unlinkedCodeBlock->m_numVars)
1484    , m_isConstructor(unlinkedCodeBlock->isConstructor())
1485    , m_shouldAlwaysBeInlined(true)
1486    , m_didFailFTLCompilation(false)
1487    , m_hasBeenCompiledWithFTL(false)
1488    , m_unlinkedCode(m_globalObject->vm(), ownerExecutable, unlinkedCodeBlock)
1489    , m_hasDebuggerStatement(false)
1490    , m_steppingMode(SteppingModeDisabled)
1491    , m_numBreakpoints(0)
1492    , m_ownerExecutable(m_globalObject->vm(), ownerExecutable, ownerExecutable)
1493    , m_vm(unlinkedCodeBlock->vm())
1494    , m_thisRegister(unlinkedCodeBlock->thisRegister())
1495    , m_argumentsRegister(unlinkedCodeBlock->argumentsRegister())
1496    , m_activationRegister(unlinkedCodeBlock->activationRegister())
1497    , m_isStrictMode(unlinkedCodeBlock->isStrictMode())
1498    , m_needsActivation(unlinkedCodeBlock->hasActivationRegister() && unlinkedCodeBlock->codeType() == FunctionCode)
1499    , m_mayBeExecuting(false)
1500    , m_visitAggregateHasBeenCalled(false)
1501    , m_source(sourceProvider)
1502    , m_sourceOffset(sourceOffset)
1503    , m_firstLineColumnOffset(firstLineColumnOffset)
1504    , m_codeType(unlinkedCodeBlock->codeType())
1505    , m_osrExitCounter(0)
1506    , m_optimizationDelayCounter(0)
1507    , m_reoptimizationRetryCounter(0)
1508#if ENABLE(JIT)
1509    , m_capabilityLevelState(DFG::CapabilityLevelNotSet)
1510#endif
1511{
1512    ASSERT(m_heap->isDeferred());
1513
1514    bool didCloneSymbolTable = false;
1515
1516    if (SymbolTable* symbolTable = unlinkedCodeBlock->symbolTable()) {
1517        if (codeType() == FunctionCode && symbolTable->captureCount()) {
1518            m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable->cloneCapturedNames(*m_vm));
1519            didCloneSymbolTable = true;
1520        } else
1521            m_symbolTable.set(*m_vm, m_ownerExecutable.get(), symbolTable);
1522    }
1523
1524    ASSERT(m_source);
1525    setNumParameters(unlinkedCodeBlock->numParameters());
1526
1527    setConstantRegisters(unlinkedCodeBlock->constantRegisters());
1528    if (unlinkedCodeBlock->usesGlobalObject())
1529        m_constantRegisters[unlinkedCodeBlock->globalObjectRegister().toConstantIndex()].set(*m_vm, ownerExecutable, m_globalObject.get());
1530    m_functionDecls.resizeToFit(unlinkedCodeBlock->numberOfFunctionDecls());
1531    for (size_t count = unlinkedCodeBlock->numberOfFunctionDecls(), i = 0; i < count; ++i) {
1532        UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionDecl(i);
1533        unsigned lineCount = unlinkedExecutable->lineCount();
1534        unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1535        bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
1536        unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
1537        bool endColumnIsOnStartLine = !lineCount;
1538        unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
1539        unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1540        unsigned sourceLength = unlinkedExecutable->sourceLength();
1541        SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1542        FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
1543        m_functionDecls[i].set(*m_vm, ownerExecutable, executable);
1544    }
1545
1546    m_functionExprs.resizeToFit(unlinkedCodeBlock->numberOfFunctionExprs());
1547    for (size_t count = unlinkedCodeBlock->numberOfFunctionExprs(), i = 0; i < count; ++i) {
1548        UnlinkedFunctionExecutable* unlinkedExecutable = unlinkedCodeBlock->functionExpr(i);
1549        unsigned lineCount = unlinkedExecutable->lineCount();
1550        unsigned firstLine = ownerExecutable->lineNo() + unlinkedExecutable->firstLineOffset();
1551        bool startColumnIsOnOwnerStartLine = !unlinkedExecutable->firstLineOffset();
1552        unsigned startColumn = unlinkedExecutable->unlinkedBodyStartColumn() + (startColumnIsOnOwnerStartLine ? ownerExecutable->startColumn() : 1);
1553        bool endColumnIsOnStartLine = !lineCount;
1554        unsigned endColumn = unlinkedExecutable->unlinkedBodyEndColumn() + (endColumnIsOnStartLine ? startColumn : 1);
1555        unsigned startOffset = sourceOffset + unlinkedExecutable->startOffset();
1556        unsigned sourceLength = unlinkedExecutable->sourceLength();
1557        SourceCode code(m_source, startOffset, startOffset + sourceLength, firstLine, startColumn);
1558        FunctionExecutable* executable = FunctionExecutable::create(*m_vm, code, unlinkedExecutable, firstLine, firstLine + lineCount, startColumn, endColumn);
1559        m_functionExprs[i].set(*m_vm, ownerExecutable, executable);
1560    }
1561
1562    if (unlinkedCodeBlock->hasRareData()) {
1563        createRareDataIfNecessary();
1564        if (size_t count = unlinkedCodeBlock->constantBufferCount()) {
1565            m_rareData->m_constantBuffers.grow(count);
1566            for (size_t i = 0; i < count; i++) {
1567                const UnlinkedCodeBlock::ConstantBuffer& buffer = unlinkedCodeBlock->constantBuffer(i);
1568                m_rareData->m_constantBuffers[i] = buffer;
1569            }
1570        }
1571        if (size_t count = unlinkedCodeBlock->numberOfExceptionHandlers()) {
1572            m_rareData->m_exceptionHandlers.resizeToFit(count);
1573            size_t nonLocalScopeDepth = scope->depth();
1574            for (size_t i = 0; i < count; i++) {
1575                const UnlinkedHandlerInfo& handler = unlinkedCodeBlock->exceptionHandler(i);
1576                m_rareData->m_exceptionHandlers[i].start = handler.start;
1577                m_rareData->m_exceptionHandlers[i].end = handler.end;
1578                m_rareData->m_exceptionHandlers[i].target = handler.target;
1579                m_rareData->m_exceptionHandlers[i].scopeDepth = nonLocalScopeDepth + handler.scopeDepth;
1580#if ENABLE(JIT)
1581                m_rareData->m_exceptionHandlers[i].nativeCode = CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(op_catch)));
1582#endif
1583            }
1584        }
1585
1586        if (size_t count = unlinkedCodeBlock->numberOfStringSwitchJumpTables()) {
1587            m_rareData->m_stringSwitchJumpTables.grow(count);
1588            for (size_t i = 0; i < count; i++) {
1589                UnlinkedStringJumpTable::StringOffsetTable::iterator ptr = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.begin();
1590                UnlinkedStringJumpTable::StringOffsetTable::iterator end = unlinkedCodeBlock->stringSwitchJumpTable(i).offsetTable.end();
1591                for (; ptr != end; ++ptr) {
1592                    OffsetLocation offset;
1593                    offset.branchOffset = ptr->value;
1594                    m_rareData->m_stringSwitchJumpTables[i].offsetTable.add(ptr->key, offset);
1595                }
1596            }
1597        }
1598
1599        if (size_t count = unlinkedCodeBlock->numberOfSwitchJumpTables()) {
1600            m_rareData->m_switchJumpTables.grow(count);
1601            for (size_t i = 0; i < count; i++) {
1602                UnlinkedSimpleJumpTable& sourceTable = unlinkedCodeBlock->switchJumpTable(i);
1603                SimpleJumpTable& destTable = m_rareData->m_switchJumpTables[i];
1604                destTable.branchOffsets = sourceTable.branchOffsets;
1605                destTable.min = sourceTable.min;
1606            }
1607        }
1608    }
1609
1610    // Allocate metadata buffers for the bytecode
1611    if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())
1612        m_llintCallLinkInfos.resizeToFit(size);
1613    if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())
1614        m_arrayProfiles.grow(size);
1615    if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())
1616        m_arrayAllocationProfiles.resizeToFit(size);
1617    if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())
1618        m_valueProfiles.resizeToFit(size);
1619    if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())
1620        m_objectAllocationProfiles.resizeToFit(size);
1621
1622    // Copy and translate the UnlinkedInstructions
1623    unsigned instructionCount = unlinkedCodeBlock->instructions().count();
1624    UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());
1625
1626    Vector<Instruction, 0, UnsafeVectorOverflow> instructions(instructionCount);
1627    for (unsigned i = 0; !instructionReader.atEnd(); ) {
1628        const UnlinkedInstruction* pc = instructionReader.next();
1629
1630        unsigned opLength = opcodeLength(pc[0].u.opcode);
1631
1632        instructions[i] = vm()->interpreter->getOpcode(pc[0].u.opcode);
1633        for (size_t j = 1; j < opLength; ++j) {
1634            if (sizeof(int32_t) != sizeof(intptr_t))
1635                instructions[i + j].u.pointer = 0;
1636            instructions[i + j].u.operand = pc[j].u.operand;
1637        }
1638        switch (pc[0].u.opcode) {
1639        case op_call_varargs:
1640        case op_construct_varargs:
1641        case op_get_by_val:
1642        case op_get_argument_by_val: {
1643            int arrayProfileIndex = pc[opLength - 2].u.operand;
1644            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1645
1646            instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1647            FALLTHROUGH;
1648        }
1649        case op_get_by_id: {
1650            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1651            ASSERT(profile->m_bytecodeOffset == -1);
1652            profile->m_bytecodeOffset = i;
1653            instructions[i + opLength - 1] = profile;
1654            break;
1655        }
1656        case op_put_by_val: {
1657            int arrayProfileIndex = pc[opLength - 1].u.operand;
1658            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1659            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1660            break;
1661        }
1662        case op_put_by_val_direct: {
1663            int arrayProfileIndex = pc[opLength - 1].u.operand;
1664            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1665            instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex];
1666            break;
1667        }
1668
1669        case op_new_array:
1670        case op_new_array_buffer:
1671        case op_new_array_with_size: {
1672            int arrayAllocationProfileIndex = pc[opLength - 1].u.operand;
1673            instructions[i + opLength - 1] = &m_arrayAllocationProfiles[arrayAllocationProfileIndex];
1674            break;
1675        }
1676        case op_new_object: {
1677            int objectAllocationProfileIndex = pc[opLength - 1].u.operand;
1678            ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex];
1679            int inferredInlineCapacity = pc[opLength - 2].u.operand;
1680
1681            instructions[i + opLength - 1] = objectAllocationProfile;
1682            objectAllocationProfile->initialize(*vm(),
1683                m_ownerExecutable.get(), m_globalObject->objectPrototype(), inferredInlineCapacity);
1684            break;
1685        }
1686
1687        case op_call:
1688        case op_call_eval: {
1689            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1690            ASSERT(profile->m_bytecodeOffset == -1);
1691            profile->m_bytecodeOffset = i;
1692            instructions[i + opLength - 1] = profile;
1693            int arrayProfileIndex = pc[opLength - 2].u.operand;
1694            m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i);
1695            instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex];
1696            instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1697            break;
1698        }
1699        case op_construct: {
1700            instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand];
1701            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1702            ASSERT(profile->m_bytecodeOffset == -1);
1703            profile->m_bytecodeOffset = i;
1704            instructions[i + opLength - 1] = profile;
1705            break;
1706        }
1707        case op_get_by_id_out_of_line:
1708        case op_get_array_length:
1709            CRASH();
1710
1711        case op_init_global_const_nop: {
1712            ASSERT(codeType() == GlobalCode);
1713            Identifier ident = identifier(pc[4].u.operand);
1714            SymbolTableEntry entry = m_globalObject->symbolTable()->get(ident.impl());
1715            if (entry.isNull())
1716                break;
1717
1718            instructions[i + 0] = vm()->interpreter->getOpcode(op_init_global_const);
1719            instructions[i + 1] = &m_globalObject->registerAt(entry.getIndex());
1720            break;
1721        }
1722
1723        case op_resolve_scope: {
1724            const Identifier& ident = identifier(pc[2].u.operand);
1725            ResolveType type = static_cast<ResolveType>(pc[3].u.operand);
1726
1727            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, type);
1728            instructions[i + 3].u.operand = op.type;
1729            instructions[i + 4].u.operand = op.depth;
1730            if (op.activation)
1731                instructions[i + 5].u.activation.set(*vm(), ownerExecutable, op.activation);
1732            break;
1733        }
1734
1735        case op_get_from_scope: {
1736            ValueProfile* profile = &m_valueProfiles[pc[opLength - 1].u.operand];
1737            ASSERT(profile->m_bytecodeOffset == -1);
1738            profile->m_bytecodeOffset = i;
1739            instructions[i + opLength - 1] = profile;
1740
1741            // get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
1742            const Identifier& ident = identifier(pc[3].u.operand);
1743            ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
1744            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Get, modeAndType.type());
1745
1746            instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1747            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
1748                instructions[i + 5].u.watchpointSet = op.watchpointSet;
1749            else if (op.structure)
1750                instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1751            instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1752            break;
1753        }
1754
1755        case op_put_to_scope: {
1756            // put_to_scope scope, id, value, ResolveModeAndType, Structure, Operand
1757            const Identifier& ident = identifier(pc[2].u.operand);
1758            ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
1759            ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), scope, ident, Put, modeAndType.type());
1760
1761            instructions[i + 4].u.operand = ResolveModeAndType(modeAndType.mode(), op.type).operand();
1762            if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks)
1763                instructions[i + 5].u.watchpointSet = op.watchpointSet;
1764            else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) {
1765                if (op.watchpointSet)
1766                    op.watchpointSet->invalidate();
1767            } else if (op.structure)
1768                instructions[i + 5].u.structure.set(*vm(), ownerExecutable, op.structure);
1769            instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);
1770            break;
1771        }
1772
1773        case op_captured_mov:
1774        case op_new_captured_func: {
1775            if (pc[3].u.index == UINT_MAX) {
1776                instructions[i + 3].u.watchpointSet = 0;
1777                break;
1778            }
1779            StringImpl* uid = identifier(pc[3].u.index).impl();
1780            RELEASE_ASSERT(didCloneSymbolTable);
1781            ConcurrentJITLocker locker(m_symbolTable->m_lock);
1782            SymbolTable::Map::iterator iter = m_symbolTable->find(locker, uid);
1783            ASSERT(iter != m_symbolTable->end(locker));
1784            iter->value.prepareToWatch(symbolTable());
1785            instructions[i + 3].u.watchpointSet = iter->value.watchpointSet();
1786            break;
1787        }
1788
1789        case op_debug: {
1790            if (pc[1].u.index == DidReachBreakpoint)
1791                m_hasDebuggerStatement = true;
1792            break;
1793        }
1794
1795        default:
1796            break;
1797        }
1798        i += opLength;
1799    }
1800    m_instructions = WTF::RefCountedArray<Instruction>(instructions);
1801
1802    // Set optimization thresholds only after m_instructions is initialized, since these
1803    // rely on the instruction count (and are in theory permitted to also inspect the
1804    // instruction stream to more accurate assess the cost of tier-up).
1805    optimizeAfterWarmUp();
1806    jitAfterWarmUp();
1807
1808    // If the concurrent thread will want the code block's hash, then compute it here
1809    // synchronously.
1810    if (Options::alwaysComputeHash())
1811        hash();
1812
1813    if (Options::dumpGeneratedBytecodes())
1814        dumpBytecode();
1815
1816    m_heap->m_codeBlocks.add(this);
1817    m_heap->reportExtraMemoryCost(sizeof(CodeBlock) + m_instructions.size() * sizeof(Instruction));
1818}
1819
1820CodeBlock::~CodeBlock()
1821{
1822    if (m_vm->m_perBytecodeProfiler)
1823        m_vm->m_perBytecodeProfiler->notifyDestruction(this);
1824
1825#if ENABLE(VERBOSE_VALUE_PROFILE)
1826    dumpValueProfiles();
1827#endif
1828    while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
1829        m_incomingLLIntCalls.begin()->remove();
1830#if ENABLE(JIT)
1831    // We may be destroyed before any CodeBlocks that refer to us are destroyed.
1832    // Consider that two CodeBlocks become unreachable at the same time. There
1833    // is no guarantee about the order in which the CodeBlocks are destroyed.
1834    // So, if we don't remove incoming calls, and get destroyed before the
1835    // CodeBlock(s) that have calls into us, then the CallLinkInfo vector's
1836    // destructor will try to remove nodes from our (no longer valid) linked list.
1837    while (m_incomingCalls.begin() != m_incomingCalls.end())
1838        m_incomingCalls.begin()->remove();
1839
1840    // Note that our outgoing calls will be removed from other CodeBlocks'
1841    // m_incomingCalls linked lists through the execution of the ~CallLinkInfo
1842    // destructors.
1843
1844    for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter)
1845        (*iter)->deref();
1846#endif // ENABLE(JIT)
1847}
1848
1849void CodeBlock::setNumParameters(int newValue)
1850{
1851    m_numParameters = newValue;
1852
1853    m_argumentValueProfiles.resizeToFit(newValue);
1854}
1855
1856void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
1857{
1858    EvalCacheMap::iterator end = m_cacheMap.end();
1859    for (EvalCacheMap::iterator ptr = m_cacheMap.begin(); ptr != end; ++ptr)
1860        visitor.append(&ptr->value);
1861}
1862
1863CodeBlock* CodeBlock::specialOSREntryBlockOrNull()
1864{
1865#if ENABLE(FTL_JIT)
1866    if (jitType() != JITCode::DFGJIT)
1867        return 0;
1868    DFG::JITCode* jitCode = m_jitCode->dfg();
1869    return jitCode->osrEntryBlock.get();
1870#else // ENABLE(FTL_JIT)
1871    return 0;
1872#endif // ENABLE(FTL_JIT)
1873}
1874
1875void CodeBlock::visitAggregate(SlotVisitor& visitor)
1876{
1877#if ENABLE(PARALLEL_GC)
1878    // I may be asked to scan myself more than once, and it may even happen concurrently.
1879    // To this end, use a CAS loop to check if I've been called already. Only one thread
1880    // may proceed past this point - whichever one wins the CAS race.
1881    unsigned oldValue;
1882    do {
1883        oldValue = m_visitAggregateHasBeenCalled;
1884        if (oldValue) {
1885            // Looks like someone else won! Return immediately to ensure that we don't
1886            // trace the same CodeBlock concurrently. Doing so is hazardous since we will
1887            // be mutating the state of ValueProfiles, which contain JSValues, which can
1888            // have word-tearing on 32-bit, leading to awesome timing-dependent crashes
1889            // that are nearly impossible to track down.
1890
1891            // Also note that it must be safe to return early as soon as we see the
1892            // value true (well, (unsigned)1), since once a GC thread is in this method
1893            // and has won the CAS race (i.e. was responsible for setting the value true)
1894            // it will definitely complete the rest of this method before declaring
1895            // termination.
1896            return;
1897        }
1898    } while (!WTF::weakCompareAndSwap(&m_visitAggregateHasBeenCalled, 0, 1));
1899#endif // ENABLE(PARALLEL_GC)
1900
1901    if (!!m_alternative)
1902        m_alternative->visitAggregate(visitor);
1903
1904    if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
1905        otherBlock->visitAggregate(visitor);
1906
1907    visitor.reportExtraMemoryUsage(ownerExecutable(), sizeof(CodeBlock));
1908    if (m_jitCode)
1909        visitor.reportExtraMemoryUsage(ownerExecutable(), m_jitCode->size());
1910    if (m_instructions.size()) {
1911        // Divide by refCount() because m_instructions points to something that is shared
1912        // by multiple CodeBlocks, and we only want to count it towards the heap size once.
1913        // Having each CodeBlock report only its proportional share of the size is one way
1914        // of accomplishing this.
1915        visitor.reportExtraMemoryUsage(ownerExecutable(), m_instructions.size() * sizeof(Instruction) / m_instructions.refCount());
1916    }
1917
1918    visitor.append(&m_unlinkedCode);
1919
1920    // There are three things that may use unconditional finalizers: lazy bytecode freeing,
1921    // inline cache clearing, and jettisoning. The probability of us wanting to do at
1922    // least one of those things is probably quite close to 1. So we add one no matter what
1923    // and when it runs, it figures out whether it has any work to do.
1924    visitor.addUnconditionalFinalizer(this);
1925
1926    m_allTransitionsHaveBeenMarked = false;
1927
1928    if (shouldImmediatelyAssumeLivenessDuringScan()) {
1929        // This code block is live, so scan all references strongly and return.
1930        stronglyVisitStrongReferences(visitor);
1931        stronglyVisitWeakReferences(visitor);
1932        propagateTransitions(visitor);
1933        return;
1934    }
1935
1936    // There are two things that we use weak reference harvesters for: DFG fixpoint for
1937    // jettisoning, and trying to find structures that would be live based on some
1938    // inline cache. So it makes sense to register them regardless.
1939    visitor.addWeakReferenceHarvester(this);
1940
1941#if ENABLE(DFG_JIT)
1942    // We get here if we're live in the sense that our owner executable is live,
1943    // but we're not yet live for sure in another sense: we may yet decide that this
1944    // code block should be jettisoned based on its outgoing weak references being
1945    // stale. Set a flag to indicate that we're still assuming that we're dead, and
1946    // perform one round of determining if we're live. The GC may determine, based on
1947    // either us marking additional objects, or by other objects being marked for
1948    // other reasons, that this iteration should run again; it will notify us of this
1949    // decision by calling harvestWeakReferences().
1950
1951    m_jitCode->dfgCommon()->livenessHasBeenProved = false;
1952
1953    propagateTransitions(visitor);
1954    determineLiveness(visitor);
1955#else // ENABLE(DFG_JIT)
1956    RELEASE_ASSERT_NOT_REACHED();
1957#endif // ENABLE(DFG_JIT)
1958}
1959
1960bool CodeBlock::shouldImmediatelyAssumeLivenessDuringScan()
1961{
1962#if ENABLE(DFG_JIT)
1963    // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
1964    // their weak references go stale. So if a basline JIT CodeBlock gets
1965    // scanned, we can assume that this means that it's live.
1966    if (!JITCode::isOptimizingJIT(jitType()))
1967        return true;
1968
1969    // For simplicity, we don't attempt to jettison code blocks during GC if
1970    // they are executing. Instead we strongly mark their weak references to
1971    // allow them to continue to execute soundly.
1972    if (m_mayBeExecuting)
1973        return true;
1974
1975    if (Options::forceDFGCodeBlockLiveness())
1976        return true;
1977
1978    return false;
1979#else
1980    return true;
1981#endif
1982}
1983
1984bool CodeBlock::isKnownToBeLiveDuringGC()
1985{
1986#if ENABLE(DFG_JIT)
1987    // This should return true for:
1988    // - Code blocks that behave like normal objects - i.e. if they are referenced then they
1989    //   are live.
1990    // - Code blocks that were running on the stack.
1991    // - Code blocks that survived the last GC if the current GC is an Eden GC. This is
1992    //   because either livenessHasBeenProved would have survived as true or m_mayBeExecuting
1993    //   would survive as true.
1994    // - Code blocks that don't have any dead weak references.
1995
1996    return shouldImmediatelyAssumeLivenessDuringScan()
1997        || m_jitCode->dfgCommon()->livenessHasBeenProved;
1998#else
1999    return true;
2000#endif
2001}
2002
2003void CodeBlock::propagateTransitions(SlotVisitor& visitor)
2004{
2005    UNUSED_PARAM(visitor);
2006
2007    if (m_allTransitionsHaveBeenMarked)
2008        return;
2009
2010    bool allAreMarkedSoFar = true;
2011
2012    Interpreter* interpreter = m_vm->interpreter;
2013    if (jitType() == JITCode::InterpreterThunk) {
2014        const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2015        for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) {
2016            Instruction* instruction = &instructions()[propertyAccessInstructions[i]];
2017            switch (interpreter->getOpcodeID(instruction[0].u.opcode)) {
2018            case op_put_by_id_transition_direct:
2019            case op_put_by_id_transition_normal:
2020            case op_put_by_id_transition_direct_out_of_line:
2021            case op_put_by_id_transition_normal_out_of_line: {
2022                if (Heap::isMarked(instruction[4].u.structure.get()))
2023                    visitor.append(&instruction[6].u.structure);
2024                else
2025                    allAreMarkedSoFar = false;
2026                break;
2027            }
2028            default:
2029                break;
2030            }
2031        }
2032    }
2033
2034#if ENABLE(JIT)
2035    if (JITCode::isJIT(jitType())) {
2036        for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2037            StructureStubInfo& stubInfo = **iter;
2038            switch (stubInfo.accessType) {
2039            case access_put_by_id_transition_normal:
2040            case access_put_by_id_transition_direct: {
2041                JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2042                if ((!origin || Heap::isMarked(origin))
2043                    && Heap::isMarked(stubInfo.u.putByIdTransition.previousStructure.get()))
2044                    visitor.append(&stubInfo.u.putByIdTransition.structure);
2045                else
2046                    allAreMarkedSoFar = false;
2047                break;
2048            }
2049
2050            case access_put_by_id_list: {
2051                PolymorphicPutByIdList* list = stubInfo.u.putByIdList.list;
2052                JSCell* origin = stubInfo.codeOrigin.codeOriginOwner();
2053                if (origin && !Heap::isMarked(origin)) {
2054                    allAreMarkedSoFar = false;
2055                    break;
2056                }
2057                for (unsigned j = list->size(); j--;) {
2058                    PutByIdAccess& access = list->m_list[j];
2059                    if (!access.isTransition())
2060                        continue;
2061                    if (Heap::isMarked(access.oldStructure()))
2062                        visitor.append(&access.m_newStructure);
2063                    else
2064                        allAreMarkedSoFar = false;
2065                }
2066                break;
2067            }
2068
2069            default:
2070                break;
2071            }
2072        }
2073    }
2074#endif // ENABLE(JIT)
2075
2076#if ENABLE(DFG_JIT)
2077    if (JITCode::isOptimizingJIT(jitType())) {
2078        DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2079        for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2080            if ((!dfgCommon->transitions[i].m_codeOrigin
2081                 || Heap::isMarked(dfgCommon->transitions[i].m_codeOrigin.get()))
2082                && Heap::isMarked(dfgCommon->transitions[i].m_from.get())) {
2083                // If the following three things are live, then the target of the
2084                // transition is also live:
2085                // - This code block. We know it's live already because otherwise
2086                //   we wouldn't be scanning ourselves.
2087                // - The code origin of the transition. Transitions may arise from
2088                //   code that was inlined. They are not relevant if the user's
2089                //   object that is required for the inlinee to run is no longer
2090                //   live.
2091                // - The source of the transition. The transition checks if some
2092                //   heap location holds the source, and if so, stores the target.
2093                //   Hence the source must be live for the transition to be live.
2094                visitor.append(&dfgCommon->transitions[i].m_to);
2095            } else
2096                allAreMarkedSoFar = false;
2097        }
2098    }
2099#endif // ENABLE(DFG_JIT)
2100
2101    if (allAreMarkedSoFar)
2102        m_allTransitionsHaveBeenMarked = true;
2103}
2104
2105void CodeBlock::determineLiveness(SlotVisitor& visitor)
2106{
2107    UNUSED_PARAM(visitor);
2108
2109    if (shouldImmediatelyAssumeLivenessDuringScan())
2110        return;
2111
2112#if ENABLE(DFG_JIT)
2113    // Check if we have any remaining work to do.
2114    DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2115    if (dfgCommon->livenessHasBeenProved)
2116        return;
2117
2118    // Now check all of our weak references. If all of them are live, then we
2119    // have proved liveness and so we scan our strong references. If at end of
2120    // GC we still have not proved liveness, then this code block is toast.
2121    bool allAreLiveSoFar = true;
2122    for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2123        if (!Heap::isMarked(dfgCommon->weakReferences[i].get())) {
2124            allAreLiveSoFar = false;
2125            break;
2126        }
2127    }
2128
2129    // If some weak references are dead, then this fixpoint iteration was
2130    // unsuccessful.
2131    if (!allAreLiveSoFar)
2132        return;
2133
2134    // All weak references are live. Record this information so we don't
2135    // come back here again, and scan the strong references.
2136    dfgCommon->livenessHasBeenProved = true;
2137    stronglyVisitStrongReferences(visitor);
2138#endif // ENABLE(DFG_JIT)
2139}
2140
2141void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
2142{
2143    propagateTransitions(visitor);
2144    determineLiveness(visitor);
2145}
2146
2147void CodeBlock::finalizeUnconditionally()
2148{
2149    Interpreter* interpreter = m_vm->interpreter;
2150    if (JITCode::couldBeInterpreted(jitType())) {
2151        const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();
2152        for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) {
2153            Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];
2154            switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
2155            case op_get_by_id:
2156            case op_get_by_id_out_of_line:
2157            case op_put_by_id:
2158            case op_put_by_id_out_of_line:
2159                if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
2160                    break;
2161                if (Options::verboseOSR())
2162                    dataLogF("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
2163                curInstruction[4].u.structure.clear();
2164                curInstruction[5].u.operand = 0;
2165                break;
2166            case op_put_by_id_transition_direct:
2167            case op_put_by_id_transition_normal:
2168            case op_put_by_id_transition_direct_out_of_line:
2169            case op_put_by_id_transition_normal_out_of_line:
2170                if (Heap::isMarked(curInstruction[4].u.structure.get())
2171                    && Heap::isMarked(curInstruction[6].u.structure.get())
2172                    && Heap::isMarked(curInstruction[7].u.structureChain.get()))
2173                    break;
2174                if (Options::verboseOSR()) {
2175                    dataLogF("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
2176                            curInstruction[4].u.structure.get(),
2177                            curInstruction[6].u.structure.get(),
2178                            curInstruction[7].u.structureChain.get());
2179                }
2180                curInstruction[4].u.structure.clear();
2181                curInstruction[6].u.structure.clear();
2182                curInstruction[7].u.structureChain.clear();
2183                curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
2184                break;
2185            case op_get_array_length:
2186                break;
2187            case op_to_this:
2188                if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get()))
2189                    break;
2190                if (Options::verboseOSR())
2191                    dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get());
2192                curInstruction[2].u.structure.clear();
2193                break;
2194            case op_get_callee:
2195                if (!curInstruction[2].u.jsCell || Heap::isMarked(curInstruction[2].u.jsCell.get()))
2196                    break;
2197                if (Options::verboseOSR())
2198                    dataLogF("Clearing LLInt get callee with function %p.\n", curInstruction[2].u.jsCell.get());
2199                curInstruction[2].u.jsCell.clear();
2200                break;
2201            case op_resolve_scope: {
2202                WriteBarrierBase<JSActivation>& activation = curInstruction[5].u.activation;
2203                if (!activation || Heap::isMarked(activation.get()))
2204                    break;
2205                if (Options::verboseOSR())
2206                    dataLogF("Clearing dead activation %p.\n", activation.get());
2207                activation.clear();
2208                break;
2209            }
2210            case op_get_from_scope:
2211            case op_put_to_scope: {
2212                ResolveModeAndType modeAndType =
2213                    ResolveModeAndType(curInstruction[4].u.operand);
2214                if (modeAndType.type() == GlobalVar || modeAndType.type() == GlobalVarWithVarInjectionChecks)
2215                    continue;
2216                WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure;
2217                if (!structure || Heap::isMarked(structure.get()))
2218                    break;
2219                if (Options::verboseOSR())
2220                    dataLogF("Clearing scope access with structure %p.\n", structure.get());
2221                structure.clear();
2222                break;
2223            }
2224            default:
2225                RELEASE_ASSERT_NOT_REACHED();
2226            }
2227        }
2228
2229        for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2230            if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
2231                if (Options::verboseOSR())
2232                    dataLog("Clearing LLInt call from ", *this, "\n");
2233                m_llintCallLinkInfos[i].unlink();
2234            }
2235            if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
2236                m_llintCallLinkInfos[i].lastSeenCallee.clear();
2237        }
2238    }
2239
2240#if ENABLE(DFG_JIT)
2241    // Check if we're not live. If we are, then jettison.
2242    if (!isKnownToBeLiveDuringGC()) {
2243        if (Options::verboseOSR())
2244            dataLog(*this, " has dead weak references, jettisoning during GC.\n");
2245
2246        if (DFG::shouldShowDisassembly()) {
2247            dataLog(*this, " will be jettisoned because of the following dead references:\n");
2248            DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2249            for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2250                DFG::WeakReferenceTransition& transition = dfgCommon->transitions[i];
2251                JSCell* origin = transition.m_codeOrigin.get();
2252                JSCell* from = transition.m_from.get();
2253                JSCell* to = transition.m_to.get();
2254                if ((!origin || Heap::isMarked(origin)) && Heap::isMarked(from))
2255                    continue;
2256                dataLog("    Transition under ", RawPointer(origin), ", ", RawPointer(from), " -> ", RawPointer(to), ".\n");
2257            }
2258            for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i) {
2259                JSCell* weak = dfgCommon->weakReferences[i].get();
2260                if (Heap::isMarked(weak))
2261                    continue;
2262                dataLog("    Weak reference ", RawPointer(weak), ".\n");
2263            }
2264        }
2265
2266        jettison(Profiler::JettisonDueToWeakReference);
2267        return;
2268    }
2269#endif // ENABLE(DFG_JIT)
2270
2271#if ENABLE(JIT)
2272    // Handle inline caches.
2273    if (!!jitCode()) {
2274        RepatchBuffer repatchBuffer(this);
2275
2276        for (auto iter = callLinkInfosBegin(); !!iter; ++iter)
2277            (*iter)->visitWeak(repatchBuffer);
2278
2279        for (Bag<StructureStubInfo>::iterator iter = m_stubInfos.begin(); !!iter; ++iter) {
2280            StructureStubInfo& stubInfo = **iter;
2281
2282            if (stubInfo.visitWeakReferences(repatchBuffer))
2283                continue;
2284
2285            resetStubDuringGCInternal(repatchBuffer, stubInfo);
2286        }
2287    }
2288#endif
2289}
2290
2291void CodeBlock::getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result)
2292{
2293#if ENABLE(JIT)
2294    toHashMap(m_stubInfos, getStructureStubInfoCodeOrigin, result);
2295#else
2296    UNUSED_PARAM(result);
2297#endif
2298}
2299
2300void CodeBlock::getStubInfoMap(StubInfoMap& result)
2301{
2302    ConcurrentJITLocker locker(m_lock);
2303    getStubInfoMap(locker, result);
2304}
2305
2306void CodeBlock::getCallLinkInfoMap(const ConcurrentJITLocker&, CallLinkInfoMap& result)
2307{
2308#if ENABLE(JIT)
2309    toHashMap(m_callLinkInfos, getCallLinkInfoCodeOrigin, result);
2310#else
2311    UNUSED_PARAM(result);
2312#endif
2313}
2314
2315void CodeBlock::getCallLinkInfoMap(CallLinkInfoMap& result)
2316{
2317    ConcurrentJITLocker locker(m_lock);
2318    getCallLinkInfoMap(locker, result);
2319}
2320
2321#if ENABLE(JIT)
2322StructureStubInfo* CodeBlock::addStubInfo()
2323{
2324    ConcurrentJITLocker locker(m_lock);
2325    return m_stubInfos.add();
2326}
2327
2328CallLinkInfo* CodeBlock::addCallLinkInfo()
2329{
2330    ConcurrentJITLocker locker(m_lock);
2331    return m_callLinkInfos.add();
2332}
2333
2334void CodeBlock::resetStub(StructureStubInfo& stubInfo)
2335{
2336    if (stubInfo.accessType == access_unset)
2337        return;
2338
2339    ConcurrentJITLocker locker(m_lock);
2340
2341    RepatchBuffer repatchBuffer(this);
2342    resetStubInternal(repatchBuffer, stubInfo);
2343}
2344
2345void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2346{
2347    AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
2348
2349    if (Options::verboseOSR()) {
2350        // This can be called from GC destructor calls, so we don't try to do a full dump
2351        // of the CodeBlock.
2352        dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", RawPointer(this), ".\n");
2353    }
2354
2355    RELEASE_ASSERT(JITCode::isJIT(jitType()));
2356
2357    if (isGetByIdAccess(accessType))
2358        resetGetByID(repatchBuffer, stubInfo);
2359    else if (isPutByIdAccess(accessType))
2360        resetPutByID(repatchBuffer, stubInfo);
2361    else {
2362        RELEASE_ASSERT(isInAccess(accessType));
2363        resetIn(repatchBuffer, stubInfo);
2364    }
2365
2366    stubInfo.reset();
2367}
2368
2369void CodeBlock::resetStubDuringGCInternal(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
2370{
2371    resetStubInternal(repatchBuffer, stubInfo);
2372    stubInfo.resetByGC = true;
2373}
2374
2375CallLinkInfo* CodeBlock::getCallLinkInfoForBytecodeIndex(unsigned index)
2376{
2377    for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2378        if ((*iter)->codeOrigin == CodeOrigin(index))
2379            return *iter;
2380    }
2381    return nullptr;
2382}
2383#endif
2384
2385void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
2386{
2387    visitor.append(&m_globalObject);
2388    visitor.append(&m_ownerExecutable);
2389    visitor.append(&m_symbolTable);
2390    visitor.append(&m_unlinkedCode);
2391    if (m_rareData)
2392        m_rareData->m_evalCodeCache.visitAggregate(visitor);
2393    visitor.appendValues(m_constantRegisters.data(), m_constantRegisters.size());
2394    for (size_t i = 0; i < m_functionExprs.size(); ++i)
2395        visitor.append(&m_functionExprs[i]);
2396    for (size_t i = 0; i < m_functionDecls.size(); ++i)
2397        visitor.append(&m_functionDecls[i]);
2398    for (unsigned i = 0; i < m_objectAllocationProfiles.size(); ++i)
2399        m_objectAllocationProfiles[i].visitAggregate(visitor);
2400
2401#if ENABLE(DFG_JIT)
2402    if (JITCode::isOptimizingJIT(jitType())) {
2403        DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2404        if (dfgCommon->inlineCallFrames.get())
2405            dfgCommon->inlineCallFrames->visitAggregate(visitor);
2406    }
2407#endif
2408
2409    updateAllPredictions();
2410}
2411
2412void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
2413{
2414    UNUSED_PARAM(visitor);
2415
2416#if ENABLE(DFG_JIT)
2417    if (!JITCode::isOptimizingJIT(jitType()))
2418        return;
2419
2420    DFG::CommonData* dfgCommon = m_jitCode->dfgCommon();
2421
2422    for (unsigned i = 0; i < dfgCommon->transitions.size(); ++i) {
2423        if (!!dfgCommon->transitions[i].m_codeOrigin)
2424            visitor.append(&dfgCommon->transitions[i].m_codeOrigin); // Almost certainly not necessary, since the code origin should also be a weak reference. Better to be safe, though.
2425        visitor.append(&dfgCommon->transitions[i].m_from);
2426        visitor.append(&dfgCommon->transitions[i].m_to);
2427    }
2428
2429    for (unsigned i = 0; i < dfgCommon->weakReferences.size(); ++i)
2430        visitor.append(&dfgCommon->weakReferences[i]);
2431#endif
2432}
2433
2434CodeBlock* CodeBlock::baselineAlternative()
2435{
2436#if ENABLE(JIT)
2437    CodeBlock* result = this;
2438    while (result->alternative())
2439        result = result->alternative();
2440    RELEASE_ASSERT(result);
2441    RELEASE_ASSERT(JITCode::isBaselineCode(result->jitType()) || result->jitType() == JITCode::None);
2442    return result;
2443#else
2444    return this;
2445#endif
2446}
2447
2448CodeBlock* CodeBlock::baselineVersion()
2449{
2450#if ENABLE(JIT)
2451    if (JITCode::isBaselineCode(jitType()))
2452        return this;
2453    CodeBlock* result = replacement();
2454    if (!result) {
2455        // This can happen if we're creating the original CodeBlock for an executable.
2456        // Assume that we're the baseline CodeBlock.
2457        RELEASE_ASSERT(jitType() == JITCode::None);
2458        return this;
2459    }
2460    result = result->baselineAlternative();
2461    return result;
2462#else
2463    return this;
2464#endif
2465}
2466
2467#if ENABLE(JIT)
2468bool CodeBlock::hasOptimizedReplacement(JITCode::JITType typeToReplace)
2469{
2470    return JITCode::isHigherTier(replacement()->jitType(), typeToReplace);
2471}
2472
2473bool CodeBlock::hasOptimizedReplacement()
2474{
2475    return hasOptimizedReplacement(jitType());
2476}
2477#endif
2478
2479bool CodeBlock::isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame) const
2480{
2481    if (operand.isArgument())
2482        return operand.toArgument() && usesArguments();
2483
2484    if (inlineCallFrame)
2485        return inlineCallFrame->capturedVars.get(operand.toLocal());
2486
2487    // The activation object isn't in the captured region, but it's "captured"
2488    // in the sense that stores to its location can be observed indirectly.
2489    if (needsActivation() && operand == activationRegister())
2490        return true;
2491
2492    // Ditto for the arguments object.
2493    if (usesArguments() && operand == argumentsRegister())
2494        return true;
2495    if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
2496        return true;
2497
2498    // We're in global code so there are no locals to capture
2499    if (!symbolTable())
2500        return false;
2501
2502    return symbolTable()->isCaptured(operand.offset());
2503}
2504
2505int CodeBlock::framePointerOffsetToGetActivationRegisters(int machineCaptureStart)
2506{
2507    // We'll be adding this to the stack pointer to get a registers pointer that looks
2508    // like it would have looked in the baseline engine. For example, if bytecode would
2509    // have put the first captured variable at offset -5 but we put it at offset -1, then
2510    // we'll have an offset of 4.
2511    int32_t offset = 0;
2512
2513    // Compute where we put the captured variables. This offset will point the registers
2514    // pointer directly at the first captured var.
2515    offset += machineCaptureStart;
2516
2517    // Now compute the offset needed to make the runtime see the captured variables at the
2518    // same offset that the bytecode would have used.
2519    offset -= symbolTable()->captureStart();
2520
2521    return offset;
2522}
2523
2524int CodeBlock::framePointerOffsetToGetActivationRegisters()
2525{
2526    if (!JITCode::isOptimizingJIT(jitType()))
2527        return 0;
2528#if ENABLE(DFG_JIT)
2529    return framePointerOffsetToGetActivationRegisters(jitCode()->dfgCommon()->machineCaptureStart);
2530#else
2531    RELEASE_ASSERT_NOT_REACHED();
2532    return 0;
2533#endif
2534}
2535
2536HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
2537{
2538    RELEASE_ASSERT(bytecodeOffset < instructions().size());
2539
2540    if (!m_rareData)
2541        return 0;
2542
2543    Vector<HandlerInfo>& exceptionHandlers = m_rareData->m_exceptionHandlers;
2544    for (size_t i = 0; i < exceptionHandlers.size(); ++i) {
2545        // Handlers are ordered innermost first, so the first handler we encounter
2546        // that contains the source address is the correct handler to use.
2547        if (exceptionHandlers[i].start <= bytecodeOffset && exceptionHandlers[i].end > bytecodeOffset)
2548            return &exceptionHandlers[i];
2549    }
2550
2551    return 0;
2552}
2553
2554unsigned CodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset)
2555{
2556    RELEASE_ASSERT(bytecodeOffset < instructions().size());
2557    return m_ownerExecutable->lineNo() + m_unlinkedCode->lineNumberForBytecodeOffset(bytecodeOffset);
2558}
2559
2560unsigned CodeBlock::columnNumberForBytecodeOffset(unsigned bytecodeOffset)
2561{
2562    int divot;
2563    int startOffset;
2564    int endOffset;
2565    unsigned line;
2566    unsigned column;
2567    expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2568    return column;
2569}
2570
2571void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column)
2572{
2573    m_unlinkedCode->expressionRangeForBytecodeOffset(bytecodeOffset, divot, startOffset, endOffset, line, column);
2574    divot += m_sourceOffset;
2575    column += line ? 1 : firstLineColumnOffset();
2576    line += m_ownerExecutable->lineNo();
2577}
2578
2579bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column)
2580{
2581    Interpreter* interpreter = vm()->interpreter;
2582    const Instruction* begin = instructions().begin();
2583    const Instruction* end = instructions().end();
2584    for (const Instruction* it = begin; it != end;) {
2585        OpcodeID opcodeID = interpreter->getOpcodeID(it->u.opcode);
2586        if (opcodeID == op_debug) {
2587            unsigned bytecodeOffset = it - begin;
2588            int unused;
2589            unsigned opDebugLine;
2590            unsigned opDebugColumn;
2591            expressionRangeForBytecodeOffset(bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);
2592            if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn))
2593                return true;
2594        }
2595        it += opcodeLengths[opcodeID];
2596    }
2597    return false;
2598}
2599
2600void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
2601{
2602    m_rareCaseProfiles.shrinkToFit();
2603    m_specialFastCaseProfiles.shrinkToFit();
2604
2605    if (shrinkMode == EarlyShrink) {
2606        m_constantRegisters.shrinkToFit();
2607
2608        if (m_rareData) {
2609            m_rareData->m_switchJumpTables.shrinkToFit();
2610            m_rareData->m_stringSwitchJumpTables.shrinkToFit();
2611        }
2612    } // else don't shrink these, because we would have already pointed pointers into these tables.
2613}
2614
2615unsigned CodeBlock::addOrFindConstant(JSValue v)
2616{
2617    unsigned result;
2618    if (findConstant(v, result))
2619        return result;
2620    return addConstant(v);
2621}
2622
2623bool CodeBlock::findConstant(JSValue v, unsigned& index)
2624{
2625    unsigned numberOfConstants = numberOfConstantRegisters();
2626    for (unsigned i = 0; i < numberOfConstants; ++i) {
2627        if (getConstant(FirstConstantRegisterIndex + i) == v) {
2628            index = i;
2629            return true;
2630        }
2631    }
2632    index = numberOfConstants;
2633    return false;
2634}
2635
2636#if ENABLE(JIT)
2637void CodeBlock::unlinkCalls()
2638{
2639    if (!!m_alternative)
2640        m_alternative->unlinkCalls();
2641    for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
2642        if (m_llintCallLinkInfos[i].isLinked())
2643            m_llintCallLinkInfos[i].unlink();
2644    }
2645    if (m_callLinkInfos.isEmpty())
2646        return;
2647    if (!m_vm->canUseJIT())
2648        return;
2649    RepatchBuffer repatchBuffer(this);
2650    for (auto iter = m_callLinkInfos.begin(); !!iter; ++iter) {
2651        CallLinkInfo& info = **iter;
2652        if (!info.isLinked())
2653            continue;
2654        info.unlink(repatchBuffer);
2655    }
2656}
2657
2658void CodeBlock::linkIncomingCall(ExecState* callerFrame, CallLinkInfo* incoming)
2659{
2660    noticeIncomingCall(callerFrame);
2661    m_incomingCalls.push(incoming);
2662}
2663#endif // ENABLE(JIT)
2664
2665void CodeBlock::unlinkIncomingCalls()
2666{
2667    while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
2668        m_incomingLLIntCalls.begin()->unlink();
2669#if ENABLE(JIT)
2670    if (m_incomingCalls.isEmpty())
2671        return;
2672    RepatchBuffer repatchBuffer(this);
2673    while (m_incomingCalls.begin() != m_incomingCalls.end())
2674        m_incomingCalls.begin()->unlink(repatchBuffer);
2675#endif // ENABLE(JIT)
2676}
2677
2678void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* incoming)
2679{
2680    noticeIncomingCall(callerFrame);
2681    m_incomingLLIntCalls.push(incoming);
2682}
2683
2684void CodeBlock::clearEvalCache()
2685{
2686    if (!!m_alternative)
2687        m_alternative->clearEvalCache();
2688    if (CodeBlock* otherBlock = specialOSREntryBlockOrNull())
2689        otherBlock->clearEvalCache();
2690    if (!m_rareData)
2691        return;
2692    m_rareData->m_evalCodeCache.clear();
2693}
2694
2695void CodeBlock::install()
2696{
2697    ownerExecutable()->installCode(this);
2698}
2699
2700PassRefPtr<CodeBlock> CodeBlock::newReplacement()
2701{
2702    return ownerExecutable()->newReplacementCodeBlockFor(specializationKind());
2703}
2704
2705const SlowArgument* CodeBlock::machineSlowArguments()
2706{
2707    if (!JITCode::isOptimizingJIT(jitType()))
2708        return symbolTable()->slowArguments();
2709
2710#if ENABLE(DFG_JIT)
2711    return jitCode()->dfgCommon()->slowArguments.get();
2712#else // ENABLE(DFG_JIT)
2713    return 0;
2714#endif // ENABLE(DFG_JIT)
2715}
2716
2717#if ENABLE(JIT)
2718CodeBlock* ProgramCodeBlock::replacement()
2719{
2720    return jsCast<ProgramExecutable*>(ownerExecutable())->codeBlock();
2721}
2722
2723CodeBlock* EvalCodeBlock::replacement()
2724{
2725    return jsCast<EvalExecutable*>(ownerExecutable())->codeBlock();
2726}
2727
2728CodeBlock* FunctionCodeBlock::replacement()
2729{
2730    return jsCast<FunctionExecutable*>(ownerExecutable())->codeBlockFor(m_isConstructor ? CodeForConstruct : CodeForCall);
2731}
2732
2733DFG::CapabilityLevel ProgramCodeBlock::capabilityLevelInternal()
2734{
2735    return DFG::programCapabilityLevel(this);
2736}
2737
2738DFG::CapabilityLevel EvalCodeBlock::capabilityLevelInternal()
2739{
2740    return DFG::evalCapabilityLevel(this);
2741}
2742
2743DFG::CapabilityLevel FunctionCodeBlock::capabilityLevelInternal()
2744{
2745    if (m_isConstructor)
2746        return DFG::functionForConstructCapabilityLevel(this);
2747    return DFG::functionForCallCapabilityLevel(this);
2748}
2749#endif
2750
2751void CodeBlock::jettison(Profiler::JettisonReason reason, ReoptimizationMode mode)
2752{
2753    RELEASE_ASSERT(reason != Profiler::NotJettisoned);
2754
2755#if ENABLE(DFG_JIT)
2756    if (DFG::shouldShowDisassembly()) {
2757        dataLog("Jettisoning ", *this);
2758        if (mode == CountReoptimization)
2759            dataLog(" and counting reoptimization");
2760        dataLog(" due to ", reason, ".\n");
2761    }
2762
2763    DeferGCForAWhile deferGC(*m_heap);
2764    RELEASE_ASSERT(JITCode::isOptimizingJIT(jitType()));
2765
2766    if (Profiler::Compilation* compilation = jitCode()->dfgCommon()->compilation.get())
2767        compilation->setJettisonReason(reason);
2768
2769    // We want to accomplish two things here:
2770    // 1) Make sure that if this CodeBlock is on the stack right now, then if we return to it
2771    //    we should OSR exit at the top of the next bytecode instruction after the return.
2772    // 2) Make sure that if we call the owner executable, then we shouldn't call this CodeBlock.
2773
2774    // This accomplishes the OSR-exit-on-return part, and does its own book-keeping about
2775    // whether the invalidation has already happened.
2776    if (!jitCode()->dfgCommon()->invalidate()) {
2777        // Nothing to do since we've already been invalidated. That means that we cannot be
2778        // the optimized replacement.
2779        RELEASE_ASSERT(this != replacement());
2780        return;
2781    }
2782
2783    if (DFG::shouldShowDisassembly())
2784        dataLog("    Did invalidate ", *this, "\n");
2785
2786    // Count the reoptimization if that's what the user wanted.
2787    if (mode == CountReoptimization) {
2788        // FIXME: Maybe this should call alternative().
2789        // https://bugs.webkit.org/show_bug.cgi?id=123677
2790        baselineAlternative()->countReoptimization();
2791        if (DFG::shouldShowDisassembly())
2792            dataLog("    Did count reoptimization for ", *this, "\n");
2793    }
2794
2795    // Now take care of the entrypoint.
2796    if (this != replacement()) {
2797        // This means that we were never the entrypoint. This can happen for OSR entry code
2798        // blocks.
2799        return;
2800    }
2801    alternative()->optimizeAfterWarmUp();
2802    tallyFrequentExitSites();
2803    alternative()->install();
2804    if (DFG::shouldShowDisassembly())
2805        dataLog("    Did install baseline version of ", *this, "\n");
2806#else // ENABLE(DFG_JIT)
2807    UNUSED_PARAM(mode);
2808    UNREACHABLE_FOR_PLATFORM();
2809#endif // ENABLE(DFG_JIT)
2810}
2811
2812JSGlobalObject* CodeBlock::globalObjectFor(CodeOrigin codeOrigin)
2813{
2814    if (!codeOrigin.inlineCallFrame)
2815        return globalObject();
2816    return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->eitherCodeBlock()->globalObject();
2817}
2818
2819void CodeBlock::noticeIncomingCall(ExecState* callerFrame)
2820{
2821    CodeBlock* callerCodeBlock = callerFrame->codeBlock();
2822
2823    if (Options::verboseCallLink())
2824        dataLog("Noticing call link from ", *callerCodeBlock, " to ", *this, "\n");
2825
2826    if (!m_shouldAlwaysBeInlined)
2827        return;
2828
2829#if ENABLE(DFG_JIT)
2830    if (!hasBaselineJITProfiling())
2831        return;
2832
2833    if (!DFG::mightInlineFunction(this))
2834        return;
2835
2836    if (!canInline(m_capabilityLevelState))
2837        return;
2838
2839    if (!DFG::isSmallEnoughToInlineCodeInto(callerCodeBlock)) {
2840        m_shouldAlwaysBeInlined = false;
2841        if (Options::verboseCallLink())
2842            dataLog("    Clearing SABI because caller is too large.\n");
2843        return;
2844    }
2845
2846    if (callerCodeBlock->jitType() == JITCode::InterpreterThunk) {
2847        // If the caller is still in the interpreter, then we can't expect inlining to
2848        // happen anytime soon. Assume it's profitable to optimize it separately. This
2849        // ensures that a function is SABI only if it is called no more frequently than
2850        // any of its callers.
2851        m_shouldAlwaysBeInlined = false;
2852        if (Options::verboseCallLink())
2853            dataLog("    Clearing SABI because caller is in LLInt.\n");
2854        return;
2855    }
2856
2857    if (callerCodeBlock->codeType() != FunctionCode) {
2858        // If the caller is either eval or global code, assume that that won't be
2859        // optimized anytime soon. For eval code this is particularly true since we
2860        // delay eval optimization by a *lot*.
2861        m_shouldAlwaysBeInlined = false;
2862        if (Options::verboseCallLink())
2863            dataLog("    Clearing SABI because caller is not a function.\n");
2864        return;
2865    }
2866
2867    ExecState* frame = callerFrame;
2868    for (unsigned i = Options::maximumInliningDepth(); i--; frame = frame->callerFrame()) {
2869        if (frame->isVMEntrySentinel())
2870            break;
2871        if (frame->codeBlock() == this) {
2872            // Recursive calls won't be inlined.
2873            if (Options::verboseCallLink())
2874                dataLog("    Clearing SABI because recursion was detected.\n");
2875            m_shouldAlwaysBeInlined = false;
2876            return;
2877        }
2878    }
2879
2880    RELEASE_ASSERT(callerCodeBlock->m_capabilityLevelState != DFG::CapabilityLevelNotSet);
2881
2882    if (canCompile(callerCodeBlock->m_capabilityLevelState))
2883        return;
2884
2885    if (Options::verboseCallLink())
2886        dataLog("    Clearing SABI because the caller is not a DFG candidate.\n");
2887
2888    m_shouldAlwaysBeInlined = false;
2889#endif
2890}
2891
2892unsigned CodeBlock::reoptimizationRetryCounter() const
2893{
2894#if ENABLE(JIT)
2895    ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax());
2896    return m_reoptimizationRetryCounter;
2897#else
2898    return 0;
2899#endif // ENABLE(JIT)
2900}
2901
2902#if ENABLE(JIT)
2903void CodeBlock::countReoptimization()
2904{
2905    m_reoptimizationRetryCounter++;
2906    if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax())
2907        m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax();
2908}
2909
2910unsigned CodeBlock::numberOfDFGCompiles()
2911{
2912    ASSERT(JITCode::isBaselineCode(jitType()));
2913    if (Options::testTheFTL()) {
2914        if (m_didFailFTLCompilation)
2915            return 1000000;
2916        return (m_hasBeenCompiledWithFTL ? 1 : 0) + m_reoptimizationRetryCounter;
2917    }
2918    return (JITCode::isOptimizingJIT(replacement()->jitType()) ? 1 : 0) + m_reoptimizationRetryCounter;
2919}
2920
2921int32_t CodeBlock::codeTypeThresholdMultiplier() const
2922{
2923    if (codeType() == EvalCode)
2924        return Options::evalThresholdMultiplier();
2925
2926    return 1;
2927}
2928
2929double CodeBlock::optimizationThresholdScalingFactor()
2930{
2931    // This expression arises from doing a least-squares fit of
2932    //
2933    // F[x_] =: a * Sqrt[x + b] + Abs[c * x] + d
2934    //
2935    // against the data points:
2936    //
2937    //    x       F[x_]
2938    //    10       0.9          (smallest reasonable code block)
2939    //   200       1.0          (typical small-ish code block)
2940    //   320       1.2          (something I saw in 3d-cube that I wanted to optimize)
2941    //  1268       5.0          (something I saw in 3d-cube that I didn't want to optimize)
2942    //  4000       5.5          (random large size, used to cause the function to converge to a shallow curve of some sort)
2943    // 10000       6.0          (similar to above)
2944    //
2945    // I achieve the minimization using the following Mathematica code:
2946    //
2947    // MyFunctionTemplate[x_, a_, b_, c_, d_] := a*Sqrt[x + b] + Abs[c*x] + d
2948    //
2949    // samples = {{10, 0.9}, {200, 1}, {320, 1.2}, {1268, 5}, {4000, 5.5}, {10000, 6}}
2950    //
2951    // solution =
2952    //     Minimize[Plus @@ ((MyFunctionTemplate[#[[1]], a, b, c, d] - #[[2]])^2 & /@ samples),
2953    //         {a, b, c, d}][[2]]
2954    //
2955    // And the code below (to initialize a, b, c, d) is generated by:
2956    //
2957    // Print["const double " <> ToString[#[[1]]] <> " = " <>
2958    //     If[#[[2]] < 0.00001, "0.0", ToString[#[[2]]]] <> ";"] & /@ solution
2959    //
2960    // We've long known the following to be true:
2961    // - Small code blocks are cheap to optimize and so we should do it sooner rather
2962    //   than later.
2963    // - Large code blocks are expensive to optimize and so we should postpone doing so,
2964    //   and sometimes have a large enough threshold that we never optimize them.
2965    // - The difference in cost is not totally linear because (a) just invoking the
2966    //   DFG incurs some base cost and (b) for large code blocks there is enough slop
2967    //   in the correlation between instruction count and the actual compilation cost
2968    //   that for those large blocks, the instruction count should not have a strong
2969    //   influence on our threshold.
2970    //
2971    // I knew the goals but I didn't know how to achieve them; so I picked an interesting
2972    // example where the heuristics were right (code block in 3d-cube with instruction
2973    // count 320, which got compiled early as it should have been) and one where they were
2974    // totally wrong (code block in 3d-cube with instruction count 1268, which was expensive
2975    // to compile and didn't run often enough to warrant compilation in my opinion), and
2976    // then threw in additional data points that represented my own guess of what our
2977    // heuristics should do for some round-numbered examples.
2978    //
2979    // The expression to which I decided to fit the data arose because I started with an
2980    // affine function, and then did two things: put the linear part in an Abs to ensure
2981    // that the fit didn't end up choosing a negative value of c (which would result in
2982    // the function turning over and going negative for large x) and I threw in a Sqrt
2983    // term because Sqrt represents my intution that the function should be more sensitive
2984    // to small changes in small values of x, but less sensitive when x gets large.
2985
2986    // Note that the current fit essentially eliminates the linear portion of the
2987    // expression (c == 0.0).
2988    const double a = 0.061504;
2989    const double b = 1.02406;
2990    const double c = 0.0;
2991    const double d = 0.825914;
2992
2993    double instructionCount = this->instructionCount();
2994
2995    ASSERT(instructionCount); // Make sure this is called only after we have an instruction stream; otherwise it'll just return the value of d, which makes no sense.
2996
2997    double result = d + a * sqrt(instructionCount + b) + c * instructionCount;
2998
2999    result *= codeTypeThresholdMultiplier();
3000
3001    if (Options::verboseOSR()) {
3002        dataLog(
3003            *this, ": instruction count is ", instructionCount,
3004            ", scaling execution counter by ", result, " * ", codeTypeThresholdMultiplier(),
3005            "\n");
3006    }
3007    return result;
3008}
3009
3010static int32_t clipThreshold(double threshold)
3011{
3012    if (threshold < 1.0)
3013        return 1;
3014
3015    if (threshold > static_cast<double>(std::numeric_limits<int32_t>::max()))
3016        return std::numeric_limits<int32_t>::max();
3017
3018    return static_cast<int32_t>(threshold);
3019}
3020
3021int32_t CodeBlock::adjustedCounterValue(int32_t desiredThreshold)
3022{
3023    return clipThreshold(
3024        static_cast<double>(desiredThreshold) *
3025        optimizationThresholdScalingFactor() *
3026        (1 << reoptimizationRetryCounter()));
3027}
3028
3029bool CodeBlock::checkIfOptimizationThresholdReached()
3030{
3031#if ENABLE(DFG_JIT)
3032    if (DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull()) {
3033        if (worklist->compilationState(DFG::CompilationKey(this, DFG::DFGMode))
3034            == DFG::Worklist::Compiled) {
3035            optimizeNextInvocation();
3036            return true;
3037        }
3038    }
3039#endif
3040
3041    return m_jitExecuteCounter.checkIfThresholdCrossedAndSet(this);
3042}
3043
3044void CodeBlock::optimizeNextInvocation()
3045{
3046    if (Options::verboseOSR())
3047        dataLog(*this, ": Optimizing next invocation.\n");
3048    m_jitExecuteCounter.setNewThreshold(0, this);
3049}
3050
3051void CodeBlock::dontOptimizeAnytimeSoon()
3052{
3053    if (Options::verboseOSR())
3054        dataLog(*this, ": Not optimizing anytime soon.\n");
3055    m_jitExecuteCounter.deferIndefinitely();
3056}
3057
3058void CodeBlock::optimizeAfterWarmUp()
3059{
3060    if (Options::verboseOSR())
3061        dataLog(*this, ": Optimizing after warm-up.\n");
3062#if ENABLE(DFG_JIT)
3063    m_jitExecuteCounter.setNewThreshold(
3064        adjustedCounterValue(Options::thresholdForOptimizeAfterWarmUp()), this);
3065#endif
3066}
3067
3068void CodeBlock::optimizeAfterLongWarmUp()
3069{
3070    if (Options::verboseOSR())
3071        dataLog(*this, ": Optimizing after long warm-up.\n");
3072#if ENABLE(DFG_JIT)
3073    m_jitExecuteCounter.setNewThreshold(
3074        adjustedCounterValue(Options::thresholdForOptimizeAfterLongWarmUp()), this);
3075#endif
3076}
3077
3078void CodeBlock::optimizeSoon()
3079{
3080    if (Options::verboseOSR())
3081        dataLog(*this, ": Optimizing soon.\n");
3082#if ENABLE(DFG_JIT)
3083    m_jitExecuteCounter.setNewThreshold(
3084        adjustedCounterValue(Options::thresholdForOptimizeSoon()), this);
3085#endif
3086}
3087
3088void CodeBlock::forceOptimizationSlowPathConcurrently()
3089{
3090    if (Options::verboseOSR())
3091        dataLog(*this, ": Forcing slow path concurrently.\n");
3092    m_jitExecuteCounter.forceSlowPathConcurrently();
3093}
3094
3095#if ENABLE(DFG_JIT)
3096void CodeBlock::setOptimizationThresholdBasedOnCompilationResult(CompilationResult result)
3097{
3098    JITCode::JITType type = jitType();
3099    if (type != JITCode::BaselineJIT) {
3100        dataLog(*this, ": expected to have baseline code but have ", type, "\n");
3101        RELEASE_ASSERT_NOT_REACHED();
3102    }
3103
3104    CodeBlock* theReplacement = replacement();
3105    if ((result == CompilationSuccessful) != (theReplacement != this)) {
3106        dataLog(*this, ": we have result = ", result, " but ");
3107        if (theReplacement == this)
3108            dataLog("we are our own replacement.\n");
3109        else
3110            dataLog("our replacement is ", pointerDump(theReplacement), "\n");
3111        RELEASE_ASSERT_NOT_REACHED();
3112    }
3113
3114    switch (result) {
3115    case CompilationSuccessful:
3116        RELEASE_ASSERT(JITCode::isOptimizingJIT(replacement()->jitType()));
3117        optimizeNextInvocation();
3118        return;
3119    case CompilationFailed:
3120        dontOptimizeAnytimeSoon();
3121        return;
3122    case CompilationDeferred:
3123        // We'd like to do dontOptimizeAnytimeSoon() but we cannot because
3124        // forceOptimizationSlowPathConcurrently() is inherently racy. It won't
3125        // necessarily guarantee anything. So, we make sure that even if that
3126        // function ends up being a no-op, we still eventually retry and realize
3127        // that we have optimized code ready.
3128        optimizeAfterWarmUp();
3129        return;
3130    case CompilationInvalidated:
3131        // Retry with exponential backoff.
3132        countReoptimization();
3133        optimizeAfterWarmUp();
3134        return;
3135    }
3136
3137    dataLog("Unrecognized result: ", static_cast<int>(result), "\n");
3138    RELEASE_ASSERT_NOT_REACHED();
3139}
3140
3141#endif
3142
3143uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
3144{
3145    ASSERT(JITCode::isOptimizingJIT(jitType()));
3146    // Compute this the lame way so we don't saturate. This is called infrequently
3147    // enough that this loop won't hurt us.
3148    unsigned result = desiredThreshold;
3149    for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) {
3150        unsigned newResult = result << 1;
3151        if (newResult < result)
3152            return std::numeric_limits<uint32_t>::max();
3153        result = newResult;
3154    }
3155    return result;
3156}
3157
3158uint32_t CodeBlock::exitCountThresholdForReoptimization()
3159{
3160    return adjustedExitCountThreshold(Options::osrExitCountForReoptimization() * codeTypeThresholdMultiplier());
3161}
3162
3163uint32_t CodeBlock::exitCountThresholdForReoptimizationFromLoop()
3164{
3165    return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop() * codeTypeThresholdMultiplier());
3166}
3167
3168bool CodeBlock::shouldReoptimizeNow()
3169{
3170    return osrExitCounter() >= exitCountThresholdForReoptimization();
3171}
3172
3173bool CodeBlock::shouldReoptimizeFromLoopNow()
3174{
3175    return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop();
3176}
3177#endif
3178
3179ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
3180{
3181    for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
3182        if (m_arrayProfiles[i].bytecodeOffset() == bytecodeOffset)
3183            return &m_arrayProfiles[i];
3184    }
3185    return 0;
3186}
3187
3188ArrayProfile* CodeBlock::getOrAddArrayProfile(unsigned bytecodeOffset)
3189{
3190    ArrayProfile* result = getArrayProfile(bytecodeOffset);
3191    if (result)
3192        return result;
3193    return addArrayProfile(bytecodeOffset);
3194}
3195
3196void CodeBlock::updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles)
3197{
3198    ConcurrentJITLocker locker(m_lock);
3199
3200    numberOfLiveNonArgumentValueProfiles = 0;
3201    numberOfSamplesInProfiles = 0; // If this divided by ValueProfile::numberOfBuckets equals numberOfValueProfiles() then value profiles are full.
3202    for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3203        ValueProfile* profile = getFromAllValueProfiles(i);
3204        unsigned numSamples = profile->totalNumberOfSamples();
3205        if (numSamples > ValueProfile::numberOfBuckets)
3206            numSamples = ValueProfile::numberOfBuckets; // We don't want profiles that are extremely hot to be given more weight.
3207        numberOfSamplesInProfiles += numSamples;
3208        if (profile->m_bytecodeOffset < 0) {
3209            profile->computeUpdatedPrediction(locker);
3210            continue;
3211        }
3212        if (profile->numberOfSamples() || profile->m_prediction != SpecNone)
3213            numberOfLiveNonArgumentValueProfiles++;
3214        profile->computeUpdatedPrediction(locker);
3215    }
3216
3217#if ENABLE(DFG_JIT)
3218    m_lazyOperandValueProfiles.computeUpdatedPredictions(locker);
3219#endif
3220}
3221
3222void CodeBlock::updateAllValueProfilePredictions()
3223{
3224    unsigned ignoredValue1, ignoredValue2;
3225    updateAllPredictionsAndCountLiveness(ignoredValue1, ignoredValue2);
3226}
3227
3228void CodeBlock::updateAllArrayPredictions()
3229{
3230    ConcurrentJITLocker locker(m_lock);
3231
3232    for (unsigned i = m_arrayProfiles.size(); i--;)
3233        m_arrayProfiles[i].computeUpdatedPrediction(locker, this);
3234
3235    // Don't count these either, for similar reasons.
3236    for (unsigned i = m_arrayAllocationProfiles.size(); i--;)
3237        m_arrayAllocationProfiles[i].updateIndexingType();
3238}
3239
3240void CodeBlock::updateAllPredictions()
3241{
3242    updateAllValueProfilePredictions();
3243    updateAllArrayPredictions();
3244}
3245
3246bool CodeBlock::shouldOptimizeNow()
3247{
3248    if (Options::verboseOSR())
3249        dataLog("Considering optimizing ", *this, "...\n");
3250
3251    if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
3252        return true;
3253
3254    updateAllArrayPredictions();
3255
3256    unsigned numberOfLiveNonArgumentValueProfiles;
3257    unsigned numberOfSamplesInProfiles;
3258    updateAllPredictionsAndCountLiveness(numberOfLiveNonArgumentValueProfiles, numberOfSamplesInProfiles);
3259
3260    if (Options::verboseOSR()) {
3261        dataLogF(
3262            "Profile hotness: %lf (%u / %u), %lf (%u / %u)\n",
3263            (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(),
3264            numberOfLiveNonArgumentValueProfiles, numberOfValueProfiles(),
3265            (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles(),
3266            numberOfSamplesInProfiles, ValueProfile::numberOfBuckets * numberOfValueProfiles());
3267    }
3268
3269    if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate())
3270        && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate())
3271        && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay())
3272        return true;
3273
3274    ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max());
3275    m_optimizationDelayCounter++;
3276    optimizeAfterWarmUp();
3277    return false;
3278}
3279
3280#if ENABLE(DFG_JIT)
3281void CodeBlock::tallyFrequentExitSites()
3282{
3283    ASSERT(JITCode::isOptimizingJIT(jitType()));
3284    ASSERT(alternative()->jitType() == JITCode::BaselineJIT);
3285
3286    CodeBlock* profiledBlock = alternative();
3287
3288    switch (jitType()) {
3289    case JITCode::DFGJIT: {
3290        DFG::JITCode* jitCode = m_jitCode->dfg();
3291        for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3292            DFG::OSRExit& exit = jitCode->osrExit[i];
3293
3294            if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
3295                continue;
3296        }
3297        break;
3298    }
3299
3300#if ENABLE(FTL_JIT)
3301    case JITCode::FTLJIT: {
3302        // There is no easy way to avoid duplicating this code since the FTL::JITCode::osrExit
3303        // vector contains a totally different type, that just so happens to behave like
3304        // DFG::JITCode::osrExit.
3305        FTL::JITCode* jitCode = m_jitCode->ftl();
3306        for (unsigned i = 0; i < jitCode->osrExit.size(); ++i) {
3307            FTL::OSRExit& exit = jitCode->osrExit[i];
3308
3309            if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
3310                continue;
3311        }
3312        break;
3313    }
3314#endif
3315
3316    default:
3317        RELEASE_ASSERT_NOT_REACHED();
3318        break;
3319    }
3320}
3321#endif // ENABLE(DFG_JIT)
3322
3323#if ENABLE(VERBOSE_VALUE_PROFILE)
3324void CodeBlock::dumpValueProfiles()
3325{
3326    dataLog("ValueProfile for ", *this, ":\n");
3327    for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
3328        ValueProfile* profile = getFromAllValueProfiles(i);
3329        if (profile->m_bytecodeOffset < 0) {
3330            ASSERT(profile->m_bytecodeOffset == -1);
3331            dataLogF("   arg = %u: ", i);
3332        } else
3333            dataLogF("   bc = %d: ", profile->m_bytecodeOffset);
3334        if (!profile->numberOfSamples() && profile->m_prediction == SpecNone) {
3335            dataLogF("<empty>\n");
3336            continue;
3337        }
3338        profile->dump(WTF::dataFile());
3339        dataLogF("\n");
3340    }
3341    dataLog("RareCaseProfile for ", *this, ":\n");
3342    for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
3343        RareCaseProfile* profile = rareCaseProfile(i);
3344        dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3345    }
3346    dataLog("SpecialFastCaseProfile for ", *this, ":\n");
3347    for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
3348        RareCaseProfile* profile = specialFastCaseProfile(i);
3349        dataLogF("   bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
3350    }
3351}
3352#endif // ENABLE(VERBOSE_VALUE_PROFILE)
3353
3354unsigned CodeBlock::frameRegisterCount()
3355{
3356    switch (jitType()) {
3357    case JITCode::InterpreterThunk:
3358        return LLInt::frameRegisterCountFor(this);
3359
3360#if ENABLE(JIT)
3361    case JITCode::BaselineJIT:
3362        return JIT::frameRegisterCountFor(this);
3363#endif // ENABLE(JIT)
3364
3365#if ENABLE(DFG_JIT)
3366    case JITCode::DFGJIT:
3367    case JITCode::FTLJIT:
3368        return jitCode()->dfgCommon()->frameRegisterCount;
3369#endif // ENABLE(DFG_JIT)
3370
3371    default:
3372        RELEASE_ASSERT_NOT_REACHED();
3373        return 0;
3374    }
3375}
3376
3377int CodeBlock::stackPointerOffset()
3378{
3379    return virtualRegisterForLocal(frameRegisterCount() - 1).offset();
3380}
3381
3382size_t CodeBlock::predictedMachineCodeSize()
3383{
3384    // This will be called from CodeBlock::CodeBlock before either m_vm or the
3385    // instructions have been initialized. It's OK to return 0 because what will really
3386    // matter is the recomputation of this value when the slow path is triggered.
3387    if (!m_vm)
3388        return 0;
3389
3390    if (!m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT)
3391        return 0; // It's as good of a prediction as we'll get.
3392
3393    // Be conservative: return a size that will be an overestimation 84% of the time.
3394    double multiplier = m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.mean() +
3395        m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.standardDeviation();
3396
3397    // Be paranoid: silently reject bogus multipiers. Silently doing the "wrong" thing
3398    // here is OK, since this whole method is just a heuristic.
3399    if (multiplier < 0 || multiplier > 1000)
3400        return 0;
3401
3402    double doubleResult = multiplier * m_instructions.size();
3403
3404    // Be even more paranoid: silently reject values that won't fit into a size_t. If
3405    // the function is so huge that we can't even fit it into virtual memory then we
3406    // should probably have some other guards in place to prevent us from even getting
3407    // to this point.
3408    if (doubleResult > std::numeric_limits<size_t>::max())
3409        return 0;
3410
3411    return static_cast<size_t>(doubleResult);
3412}
3413
3414bool CodeBlock::usesOpcode(OpcodeID opcodeID)
3415{
3416    Interpreter* interpreter = vm()->interpreter;
3417    Instruction* instructionsBegin = instructions().begin();
3418    unsigned instructionCount = instructions().size();
3419
3420    for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
3421        switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
3422#define DEFINE_OP(curOpcode, length)        \
3423        case curOpcode:                     \
3424            if (curOpcode == opcodeID)      \
3425                return true;                \
3426            bytecodeOffset += length;       \
3427            break;
3428            FOR_EACH_OPCODE_ID(DEFINE_OP)
3429#undef DEFINE_OP
3430        default:
3431            RELEASE_ASSERT_NOT_REACHED();
3432            break;
3433        }
3434    }
3435
3436    return false;
3437}
3438
3439String CodeBlock::nameForRegister(VirtualRegister virtualRegister)
3440{
3441    ConcurrentJITLocker locker(symbolTable()->m_lock);
3442    SymbolTable::Map::iterator end = symbolTable()->end(locker);
3443    for (SymbolTable::Map::iterator ptr = symbolTable()->begin(locker); ptr != end; ++ptr) {
3444        if (ptr->value.getIndex() == virtualRegister.offset()) {
3445            // FIXME: This won't work from the compilation thread.
3446            // https://bugs.webkit.org/show_bug.cgi?id=115300
3447            return String(ptr->key);
3448        }
3449    }
3450    if (needsActivation() && virtualRegister == activationRegister())
3451        return ASCIILiteral("activation");
3452    if (virtualRegister == thisRegister())
3453        return ASCIILiteral("this");
3454    if (usesArguments()) {
3455        if (virtualRegister == argumentsRegister())
3456            return ASCIILiteral("arguments");
3457        if (unmodifiedArgumentsRegister(argumentsRegister()) == virtualRegister)
3458            return ASCIILiteral("real arguments");
3459    }
3460    if (virtualRegister.isArgument())
3461        return String::format("arguments[%3d]", virtualRegister.toArgument()).impl();
3462
3463    return "";
3464}
3465
3466namespace {
3467
3468struct VerifyCapturedDef {
3469    void operator()(CodeBlock* codeBlock, Instruction* instruction, OpcodeID opcodeID, int operand)
3470    {
3471        unsigned bytecodeOffset = instruction - codeBlock->instructions().begin();
3472
3473        if (codeBlock->isConstantRegisterIndex(operand)) {
3474            codeBlock->beginValidationDidFail();
3475            dataLog("    At bc#", bytecodeOffset, " encountered a definition of a constant.\n");
3476            codeBlock->endValidationDidFail();
3477            return;
3478        }
3479
3480        switch (opcodeID) {
3481        case op_enter:
3482        case op_captured_mov:
3483        case op_init_lazy_reg:
3484        case op_create_arguments:
3485        case op_new_captured_func:
3486            return;
3487        default:
3488            break;
3489        }
3490
3491        VirtualRegister virtualReg(operand);
3492        if (!virtualReg.isLocal())
3493            return;
3494
3495        if (codeBlock->captureCount() && codeBlock->symbolTable()->isCaptured(operand)) {
3496            codeBlock->beginValidationDidFail();
3497            dataLog("    At bc#", bytecodeOffset, " encountered invalid assignment to captured variable loc", virtualReg.toLocal(), ".\n");
3498            codeBlock->endValidationDidFail();
3499            return;
3500        }
3501
3502        return;
3503    }
3504};
3505
3506} // anonymous namespace
3507
3508void CodeBlock::validate()
3509{
3510    BytecodeLivenessAnalysis liveness(this); // Compute directly from scratch so it doesn't effect CodeBlock footprint.
3511
3512    FastBitVector liveAtHead = liveness.getLivenessInfoAtBytecodeOffset(0);
3513
3514    if (liveAtHead.numBits() != static_cast<size_t>(m_numCalleeRegisters)) {
3515        beginValidationDidFail();
3516        dataLog("    Wrong number of bits in result!\n");
3517        dataLog("    Result: ", liveAtHead, "\n");
3518        dataLog("    Bit count: ", liveAtHead.numBits(), "\n");
3519        endValidationDidFail();
3520    }
3521
3522    for (unsigned i = m_numCalleeRegisters; i--;) {
3523        bool isCaptured = false;
3524        VirtualRegister reg = virtualRegisterForLocal(i);
3525
3526        if (captureCount())
3527            isCaptured = reg.offset() <= captureStart() && reg.offset() > captureEnd();
3528
3529        if (isCaptured) {
3530            if (!liveAtHead.get(i)) {
3531                beginValidationDidFail();
3532                dataLog("    Variable loc", i, " is expected to be live because it is captured, but it isn't live.\n");
3533                dataLog("    Result: ", liveAtHead, "\n");
3534                endValidationDidFail();
3535            }
3536        } else {
3537            if (liveAtHead.get(i)) {
3538                beginValidationDidFail();
3539                dataLog("    Variable loc", i, " is expected to be dead.\n");
3540                dataLog("    Result: ", liveAtHead, "\n");
3541                endValidationDidFail();
3542            }
3543        }
3544    }
3545
3546    for (unsigned bytecodeOffset = 0; bytecodeOffset < instructions().size();) {
3547        Instruction* currentInstruction = instructions().begin() + bytecodeOffset;
3548        OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
3549
3550        VerifyCapturedDef verifyCapturedDef;
3551        computeDefsForBytecodeOffset(this, bytecodeOffset, verifyCapturedDef);
3552
3553        bytecodeOffset += opcodeLength(opcodeID);
3554    }
3555}
3556
3557void CodeBlock::beginValidationDidFail()
3558{
3559    dataLog("Validation failure in ", *this, ":\n");
3560    dataLog("\n");
3561}
3562
3563void CodeBlock::endValidationDidFail()
3564{
3565    dataLog("\n");
3566    dumpBytecode();
3567    dataLog("\n");
3568    dataLog("Validation failure.\n");
3569    RELEASE_ASSERT_NOT_REACHED();
3570}
3571
3572void CodeBlock::addBreakpoint(unsigned numBreakpoints)
3573{
3574    m_numBreakpoints += numBreakpoints;
3575    ASSERT(m_numBreakpoints);
3576    if (JITCode::isOptimizingJIT(jitType()))
3577        jettison(Profiler::JettisonDueToDebuggerBreakpoint);
3578}
3579
3580void CodeBlock::setSteppingMode(CodeBlock::SteppingMode mode)
3581{
3582    m_steppingMode = mode;
3583    if (mode == SteppingModeEnabled && JITCode::isOptimizingJIT(jitType()))
3584        jettison(Profiler::JettisonDueToDebuggerStepping);
3585}
3586
3587RareCaseProfile* CodeBlock::rareCaseProfileForBytecodeOffset(int bytecodeOffset)
3588{
3589    return tryBinarySearch<RareCaseProfile, int>(
3590        m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
3591        getRareCaseProfileBytecodeOffset);
3592}
3593
3594#if ENABLE(JIT)
3595DFG::CapabilityLevel CodeBlock::capabilityLevel()
3596{
3597    DFG::CapabilityLevel result = capabilityLevelInternal();
3598    m_capabilityLevelState = result;
3599    return result;
3600}
3601#endif
3602
3603} // namespace JSC
3604