1# Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
2#
3# Redistribution and use in source and binary forms, with or without
4# modification, are permitted provided that the following conditions
5# are met:
6# 1. Redistributions of source code must retain the above copyright
7#    notice, this list of conditions and the following disclaimer.
8# 2. Redistributions in binary form must reproduce the above copyright
9#    notice, this list of conditions and the following disclaimer in the
10#    documentation and/or other materials provided with the distribution.
11#
12# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
13# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
14# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
15# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
16# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
17# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
18# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
20# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
22# THE POSSIBILITY OF SUCH DAMAGE.
23
24# First come the common protocols that both interpreters use. Note that each
25# of these must have an ASSERT() in LLIntData.cpp
26
27# Work-around for the fact that the toolchain's awareness of armv7s results in
28# a separate slab in the fat binary, yet the offlineasm doesn't know to expect
29# it.
30if ARMv7s
31end
32
33# These declarations must match interpreter/JSStack.h.
34
35if JSVALUE64
36const PtrSize = 8
37const CallFrameHeaderSlots = 6
38else
39const PtrSize = 4
40const CallFrameHeaderSlots = 5
41const CallFrameAlignSlots = 1
42end
43const SlotSize = 8
44
45const CallerFrameAndPCSize = 2 * PtrSize
46
47const CallerFrame = 0
48const ReturnPC = CallerFrame + PtrSize
49const CodeBlock = ReturnPC + PtrSize
50const ScopeChain = CodeBlock + SlotSize
51const Callee = ScopeChain + SlotSize
52const ArgumentCount = Callee + SlotSize
53const ThisArgumentOffset = ArgumentCount + SlotSize
54const CallFrameHeaderSize = ThisArgumentOffset
55
56# Some value representation constants.
57if JSVALUE64
58const TagBitTypeOther = 0x2
59const TagBitBool      = 0x4
60const TagBitUndefined = 0x8
61const ValueEmpty      = 0x0
62const ValueFalse      = TagBitTypeOther | TagBitBool
63const ValueTrue       = TagBitTypeOther | TagBitBool | 1
64const ValueUndefined  = TagBitTypeOther | TagBitUndefined
65const ValueNull       = TagBitTypeOther
66else
67const Int32Tag = -1
68const BooleanTag = -2
69const NullTag = -3
70const UndefinedTag = -4
71const CellTag = -5
72const EmptyValueTag = -6
73const DeletedValueTag = -7
74const LowestTag = DeletedValueTag
75end
76
77const CallOpCodeSize = 9
78
79if X86_64 or ARM64 or C_LOOP
80const maxFrameExtentForSlowPathCall = 0
81elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
82const maxFrameExtentForSlowPathCall = 24
83elsif X86 or X86_WIN
84const maxFrameExtentForSlowPathCall = 40
85elsif MIPS
86const maxFrameExtentForSlowPathCall = 40
87elsif X86_64_WIN
88const maxFrameExtentForSlowPathCall = 64
89end
90
91# Watchpoint states
92const ClearWatchpoint = 0
93const IsWatched = 1
94const IsInvalidated = 2
95
96# Some register conventions.
97if JSVALUE64
98    # - Use a pair of registers to represent the PC: one register for the
99    #   base of the bytecodes, and one register for the index.
100    # - The PC base (or PB for short) should be stored in the csr. It will
101    #   get clobbered on calls to other JS code, but will get saved on calls
102    #   to C functions.
103    # - C calls are still given the Instruction* rather than the PC index.
104    #   This requires an add before the call, and a sub after.
105    const PC = t5
106    const PB = t6
107    const tagTypeNumber = csr1
108    const tagMask = csr2
109    
110    macro loadisFromInstruction(offset, dest)
111        loadis offset * 8[PB, PC, 8], dest
112    end
113    
114    macro loadpFromInstruction(offset, dest)
115        loadp offset * 8[PB, PC, 8], dest
116    end
117    
118    macro storepToInstruction(value, offset)
119        storep value, offset * 8[PB, PC, 8]
120    end
121
122else
123    const PC = t5
124    macro loadisFromInstruction(offset, dest)
125        loadis offset * 4[PC], dest
126    end
127    
128    macro loadpFromInstruction(offset, dest)
129        loadp offset * 4[PC], dest
130    end
131end
132
133# Constants for reasoning about value representation.
134if BIG_ENDIAN
135    const TagOffset = 0
136    const PayloadOffset = 4
137else
138    const TagOffset = 4
139    const PayloadOffset = 0
140end
141
142# Constant for reasoning about butterflies.
143const IsArray                  = 1
144const IndexingShapeMask        = 30
145const NoIndexingShape          = 0
146const Int32Shape               = 20
147const DoubleShape              = 22
148const ContiguousShape          = 26
149const ArrayStorageShape        = 28
150const SlowPutArrayStorageShape = 30
151
152# Type constants.
153const StringType = 5
154const ObjectType = 18
155const FinalObjectType = 19
156
157# Type flags constants.
158const MasqueradesAsUndefined = 1
159const ImplementsHasInstance = 2
160const ImplementsDefaultHasInstance = 8
161
162# Bytecode operand constants.
163const FirstConstantRegisterIndex = 0x40000000
164
165# Code type constants.
166const GlobalCode = 0
167const EvalCode = 1
168const FunctionCode = 2
169
170# The interpreter steals the tag word of the argument count.
171const LLIntReturnPC = ArgumentCount + TagOffset
172
173# String flags.
174const HashFlags8BitBuffer = 32
175
176# Copied from PropertyOffset.h
177const firstOutOfLineOffset = 100
178
179# ResolveType
180const GlobalProperty = 0
181const GlobalVar = 1
182const ClosureVar = 2
183const GlobalPropertyWithVarInjectionChecks = 3
184const GlobalVarWithVarInjectionChecks = 4
185const ClosureVarWithVarInjectionChecks = 5
186const Dynamic = 6
187
188const ResolveModeMask = 0xffff
189
190const MarkedBlockSize = 64 * 1024
191const MarkedBlockMask = ~(MarkedBlockSize - 1)
192# Constants for checking mark bits.
193const AtomNumberShift = 3
194const BitMapWordShift = 4
195
196# Allocation constants
197if JSVALUE64
198    const JSFinalObjectSizeClassIndex = 1
199else
200    const JSFinalObjectSizeClassIndex = 3
201end
202
203# This must match wtf/Vector.h
204const VectorBufferOffset = 0
205if JSVALUE64
206    const VectorSizeOffset = 12
207else
208    const VectorSizeOffset = 8
209end
210
211# Some common utilities.
212macro crash()
213    if C_LOOP
214        cloopCrash
215    else
216        call _llint_crash
217    end
218end
219
220macro assert(assertion)
221    if ASSERT_ENABLED
222        assertion(.ok)
223        crash()
224    .ok:
225    end
226end
227
228macro checkStackPointerAlignment(tempReg, location)
229    if ARM64 or C_LOOP or SH4
230        # ARM64 will check for us!
231        # C_LOOP does not need the alignment, and can use a little perf
232        # improvement from avoiding useless work.
233        # SH4 does not need specific alignment (4 bytes).
234    else
235        if ARM or ARMv7 or ARMv7_TRADITIONAL
236            # ARM can't do logical ops with the sp as a source
237            move sp, tempReg
238            andp 0xf, tempReg
239        else
240            andp sp, 0xf, tempReg
241        end
242        btpz tempReg, .stackPointerOkay
243        move location, tempReg
244        break
245    .stackPointerOkay:
246    end
247end
248
249macro preserveCallerPCAndCFR()
250    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
251        push lr
252        push cfr
253    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
254        push cfr
255    elsif ARM64
256        pushLRAndFP
257    else
258        error
259    end
260    move sp, cfr
261end
262
263macro restoreCallerPCAndCFR()
264    move cfr, sp
265    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
266        pop cfr
267        pop lr
268    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
269        pop cfr
270    elsif ARM64
271        popLRAndFP
272    end
273end
274
275macro preserveReturnAddressAfterCall(destinationRegister)
276    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
277        # In C_LOOP case, we're only preserving the bytecode vPC.
278        move lr, destinationRegister
279    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
280        pop destinationRegister
281    else
282        error
283    end
284end
285
286macro restoreReturnAddressBeforeReturn(sourceRegister)
287    if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
288        # In C_LOOP case, we're only restoring the bytecode vPC.
289        move sourceRegister, lr
290    elsif X86 or X86_WIN or X86_64 or X86_64_WIN
291        push sourceRegister
292    else
293        error
294    end
295end
296
297macro functionPrologue()
298    if X86 or X86_WIN or X86_64 or X86_64_WIN
299        push cfr
300    elsif ARM64
301        pushLRAndFP
302    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
303        push lr
304        push cfr
305    end
306    move sp, cfr
307end
308
309macro functionEpilogue()
310    if X86 or X86_WIN or X86_64 or X86_64_WIN
311        pop cfr
312    elsif ARM64
313        popLRAndFP
314    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
315        pop cfr
316        pop lr
317    end
318end
319
320macro callToJavaScriptPrologue()
321    if X86_64 or X86_64_WIN
322        push cfr
323        push t0
324    elsif X86 or X86_WIN
325        push cfr
326    elsif ARM64
327        pushLRAndFP
328    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
329        push lr
330        push cfr
331    end
332    pushCalleeSaves
333    if X86
334        subp 12, sp
335    elsif X86_WIN
336        subp 16, sp
337        move sp, t4
338        move t4, t0
339        move t4, t2
340        andp 0xf, t2
341        andp 0xfffffff0, t0
342        move t0, sp
343        storep t4, [sp]
344    elsif ARM or ARMv7 or ARMv7_TRADITIONAL
345        subp 4, sp
346        move sp, t4
347        clrbp t4, 0xf, t5
348        move t5, sp
349        storep t4, [sp]
350    end
351end
352
353macro callToJavaScriptEpilogue()
354    if ARMv7
355        addp CallFrameHeaderSlots * 8, cfr, t4
356        move t4, sp
357    else
358        addp CallFrameHeaderSlots * 8, cfr, sp
359    end
360
361    loadp CallerFrame[cfr], cfr
362
363    if X86
364        addp 12, sp
365    elsif X86_WIN
366        pop t4
367        move t4, sp
368        addp 16, sp
369    elsif ARM or ARMv7 or ARMv7_TRADITIONAL
370        pop t4
371        move t4, sp
372        addp 4, sp
373    end
374
375    popCalleeSaves
376    if X86_64 or X86_64_WIN
377        pop t2
378        pop cfr
379    elsif X86 or X86_WIN
380        pop cfr
381    elsif ARM64
382        popLRAndFP
383    elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
384        pop cfr
385        pop lr
386    end
387end
388
389macro moveStackPointerForCodeBlock(codeBlock, scratch)
390    loadi CodeBlock::m_numCalleeRegisters[codeBlock], scratch
391    lshiftp 3, scratch
392    addp maxFrameExtentForSlowPathCall, scratch
393    if ARMv7
394        subp cfr, scratch, scratch
395        move scratch, sp
396    else
397        subp cfr, scratch, sp
398    end
399end
400
401macro restoreStackPointerAfterCall()
402    loadp CodeBlock[cfr], t2
403    moveStackPointerForCodeBlock(t2, t4)
404end
405
406macro traceExecution()
407    if EXECUTION_TRACING
408        callSlowPath(_llint_trace)
409    end
410end
411
412macro callTargetFunction(callLinkInfo, calleeFramePtr)
413    move calleeFramePtr, sp
414    if C_LOOP
415        cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
416    else
417        call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
418    end
419    restoreStackPointerAfterCall()
420    dispatchAfterCall()
421end
422
423macro slowPathForCall(slowPath)
424    callCallSlowPath(
425        slowPath,
426        macro (callee)
427            btpz t1, .dontUpdateSP
428            if ARMv7
429                addp CallerFrameAndPCSize, t1, t1
430                move t1, sp
431            else
432                addp CallerFrameAndPCSize, t1, sp
433            end
434        .dontUpdateSP:
435            if C_LOOP
436                cloopCallJSFunction callee
437            else
438                call callee
439            end
440            restoreStackPointerAfterCall()
441            dispatchAfterCall()
442        end)
443end
444
445macro arrayProfile(cellAndIndexingType, profile, scratch)
446    const cell = cellAndIndexingType
447    const indexingType = cellAndIndexingType 
448    loadi JSCell::m_structureID[cell], scratch
449    storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
450    loadb JSCell::m_indexingType[cell], indexingType
451end
452
453macro checkMarkByte(cell, scratch1, scratch2, continuation)
454    loadb JSCell::m_gcData[cell], scratch1
455    continuation(scratch1)
456end
457
458macro checkSwitchToJIT(increment, action)
459    loadp CodeBlock[cfr], t0
460    baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
461    action()
462    .continue:
463end
464
465macro checkSwitchToJITForEpilogue()
466    checkSwitchToJIT(
467        10,
468        macro ()
469            callSlowPath(_llint_replace)
470        end)
471end
472
473macro assertNotConstant(index)
474    assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
475end
476
477macro functionForCallCodeBlockGetter(targetRegister)
478    loadp Callee[cfr], targetRegister
479    loadp JSFunction::m_executable[targetRegister], targetRegister
480    loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
481end
482
483macro functionForConstructCodeBlockGetter(targetRegister)
484    loadp Callee[cfr], targetRegister
485    loadp JSFunction::m_executable[targetRegister], targetRegister
486    loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
487end
488
489macro notFunctionCodeBlockGetter(targetRegister)
490    loadp CodeBlock[cfr], targetRegister
491end
492
493macro functionCodeBlockSetter(sourceRegister)
494    storep sourceRegister, CodeBlock[cfr]
495end
496
497macro notFunctionCodeBlockSetter(sourceRegister)
498    # Nothing to do!
499end
500
501# Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
502# in t1. May also trigger prologue entry OSR.
503macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
504    # Set up the call frame and check if we should OSR.
505    preserveCallerPCAndCFR()
506
507    if EXECUTION_TRACING
508        subp maxFrameExtentForSlowPathCall, sp
509        callSlowPath(traceSlowPath)
510        addp maxFrameExtentForSlowPathCall, sp
511    end
512    codeBlockGetter(t1)
513if C_LOOP
514else
515    baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
516    if JSVALUE64
517        cCall2(osrSlowPath, cfr, PC)
518    else
519        # We are after the function prologue, but before we have set up sp from the CodeBlock.
520        # Temporarily align stack pointer for this call.
521        subp 8, sp
522        cCall2(osrSlowPath, cfr, PC)
523        addp 8, sp
524    end
525    btpz t0, .recover
526    move cfr, sp # restore the previous sp
527    # pop the callerFrame since we will jump to a function that wants to save it
528    if ARM64
529        popLRAndFP
530    elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
531        pop cfr
532        pop lr
533    else
534        pop cfr
535    end
536    jmp t0
537.recover:
538    codeBlockGetter(t1)
539.continue:
540end
541
542    codeBlockSetter(t1)
543    
544    moveStackPointerForCodeBlock(t1, t2)
545
546    # Set up the PC.
547    if JSVALUE64
548        loadp CodeBlock::m_instructions[t1], PB
549        move 0, PC
550    else
551        loadp CodeBlock::m_instructions[t1], PC
552    end
553end
554
555# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
556# Must call dispatch(0) after calling this.
557macro functionInitialization(profileArgSkip)
558    # Profile the arguments. Unfortunately, we have no choice but to do this. This
559    # code is pretty horrendous because of the difference in ordering between
560    # arguments and value profiles, the desire to have a simple loop-down-to-zero
561    # loop, and the desire to use only three registers so as to preserve the PC and
562    # the code block. It is likely that this code should be rewritten in a more
563    # optimal way for architectures that have more than five registers available
564    # for arbitrary use in the interpreter.
565    loadi CodeBlock::m_numParameters[t1], t0
566    addp -profileArgSkip, t0 # Use addi because that's what has the peephole
567    assert(macro (ok) bpgteq t0, 0, ok end)
568    btpz t0, .argumentProfileDone
569    loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
570    mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
571    lshiftp 3, t0
572    addp t2, t3
573.argumentProfileLoop:
574    if JSVALUE64
575        loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
576        subp sizeof ValueProfile, t3
577        storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
578    else
579        loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
580        subp sizeof ValueProfile, t3
581        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
582        loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
583        storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
584    end
585    baddpnz -8, t0, .argumentProfileLoop
586.argumentProfileDone:
587        
588    # Check stack height.
589    loadi CodeBlock::m_numCalleeRegisters[t1], t0
590    loadp CodeBlock::m_vm[t1], t2
591    lshiftp 3, t0
592    addi maxFrameExtentForSlowPathCall, t0
593    subp cfr, t0, t0
594    bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
595
596    # Stack height check failed - need to call a slow_path.
597    callSlowPath(_llint_stack_check)
598    bpeq t1, 0, .stackHeightOK
599    move t1, cfr
600.stackHeightOK:
601end
602
603macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
604    if ALWAYS_ALLOCATE_SLOW
605        jmp slowCase
606    else
607        const offsetOfFirstFreeCell = 
608            MarkedAllocator::m_freeList + 
609            MarkedBlock::FreeList::head
610
611        # Get the object from the free list.   
612        loadp offsetOfFirstFreeCell[allocator], result
613        btpz result, slowCase
614        
615        # Remove the object from the free list.
616        loadp [result], scratch1
617        storep scratch1, offsetOfFirstFreeCell[allocator]
618    
619        # Initialize the object.
620        storep 0, JSObject::m_butterfly[result]
621        storeStructureWithTypeInfo(result, structure, scratch1)
622    end
623end
624
625macro doReturn()
626    restoreCallerPCAndCFR()
627    ret
628end
629
630# stub to call into JavaScript or Native functions
631# EncodedJSValue callToJavaScript(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
632# EncodedJSValue callToNativeFunction(void* code, ExecState** vmTopCallFrame, ProtoCallFrame* protoFrame)
633
634if C_LOOP
635_llint_call_to_javascript:
636else
637global _callToJavaScript
638_callToJavaScript:
639end
640    doCallToJavaScript(makeJavaScriptCall)
641
642
643if C_LOOP
644_llint_call_to_native_function:
645else
646global _callToNativeFunction
647_callToNativeFunction:
648end
649    doCallToJavaScript(makeHostFunctionCall)
650
651
652if C_LOOP
653else
654# void sanitizeStackForVMImpl(VM* vm)
655global _sanitizeStackForVMImpl
656_sanitizeStackForVMImpl:
657    if X86_64
658        const vm = t4
659        const address = t1
660        const zeroValue = t0
661    elsif X86_64_WIN
662        const vm = t2
663        const address = t1
664        const zeroValue = t0
665    elsif X86 or X86_WIN
666        const vm = t2
667        const address = t1
668        const zeroValue = t0
669    else
670        const vm = a0
671        const address = t1
672        const zeroValue = t2
673    end
674
675    if X86 or X86_WIN
676        loadp 4[sp], vm
677    end
678
679    loadp VM::m_lastStackTop[vm], address
680    bpbeq sp, address, .zeroFillDone
681
682    move 0, zeroValue
683.zeroFillLoop:
684    storep zeroValue, [address]
685    addp PtrSize, address
686    bpa sp, address, .zeroFillLoop
687
688.zeroFillDone:
689    move sp, address
690    storep address, VM::m_lastStackTop[vm]
691    ret
692end
693
694
695if C_LOOP
696# Dummy entry point the C Loop uses to initialize.
697_llint_entry:
698    crash()
699else
700macro initPCRelative(pcBase)
701    if X86_64 or X86_64_WIN
702        call _relativePCBase
703    _relativePCBase:
704        pop pcBase
705    elsif X86 or X86_WIN
706        call _relativePCBase
707    _relativePCBase:
708        pop pcBase
709        loadp 20[sp], t4
710    elsif ARM64
711    elsif ARMv7
712    _relativePCBase:
713        move pc, pcBase
714        subp 3, pcBase   # Need to back up the PC and set the Thumb2 bit
715    elsif ARM or ARMv7_TRADITIONAL
716    _relativePCBase:
717        move pc, pcBase
718        subp 8, pcBase
719    elsif MIPS
720        crash()  # Need to replace with any initialization steps needed to step up PC relative address calculation
721    elsif SH4
722        mova _relativePCBase, t0
723        move t0, pcBase
724        alignformova
725    _relativePCBase:
726    end
727end
728
729macro setEntryAddress(index, label)
730    if X86_64
731        leap (label - _relativePCBase)[t1], t0
732        move index, t2
733        storep t0, [t4, t2, 8]
734    elsif X86_64_WIN
735        leap (label - _relativePCBase)[t1], t0
736        move index, t4
737        storep t0, [t2, t4, 8]
738    elsif X86 or X86_WIN
739        leap (label - _relativePCBase)[t1], t0
740        move index, t2
741        storep t0, [t4, t2, 4]
742    elsif ARM64
743        pcrtoaddr label, t1
744        move index, t2
745        storep t1, [a0, t2, 8]
746    elsif ARM or ARMv7 or ARMv7_TRADITIONAL
747        mvlbl (label - _relativePCBase), t2
748        addp t2, t1, t2
749        move index, t3
750        storep t2, [a0, t3, 4]
751    elsif SH4
752        move (label - _relativePCBase), t2
753        addp t2, t1, t2
754        move index, t3
755        storep t2, [a0, t3, 4]
756        flushcp # Force constant pool flush to avoid "pcrel too far" link error.
757    elsif MIPS
758        crash()  # Need to replace with code to turn label into and absolute address and save at index
759    end
760end
761
762global _llint_entry
763# Entry point for the llint to initialize.
764_llint_entry:
765    functionPrologue()
766    pushCalleeSaves
767    initPCRelative(t1)
768
769    # Include generated bytecode initialization file.
770    include InitBytecodes
771
772    popCalleeSaves
773    functionEpilogue()
774    ret
775end
776
777_llint_program_prologue:
778    prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
779    dispatch(0)
780
781
782_llint_eval_prologue:
783    prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
784    dispatch(0)
785
786
787_llint_function_for_call_prologue:
788    prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
789    functionInitialization(0)
790    dispatch(0)
791    
792
793_llint_function_for_construct_prologue:
794    prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
795    functionInitialization(1)
796    dispatch(0)
797    
798
799_llint_function_for_call_arity_check:
800    prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
801    functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
802.functionForCallBegin:
803    functionInitialization(0)
804    dispatch(0)
805
806
807_llint_function_for_construct_arity_check:
808    prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
809    functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
810.functionForConstructBegin:
811    functionInitialization(1)
812    dispatch(0)
813
814
815# Value-representation-specific code.
816if JSVALUE64
817    include LowLevelInterpreter64
818else
819    include LowLevelInterpreter32_64
820end
821
822
823# Value-representation-agnostic code.
824_llint_op_touch_entry:
825    traceExecution()
826    callSlowPath(_slow_path_touch_entry)
827    dispatch(1)
828
829
830_llint_op_new_array:
831    traceExecution()
832    callSlowPath(_llint_slow_path_new_array)
833    dispatch(5)
834
835
836_llint_op_new_array_with_size:
837    traceExecution()
838    callSlowPath(_llint_slow_path_new_array_with_size)
839    dispatch(4)
840
841
842_llint_op_new_array_buffer:
843    traceExecution()
844    callSlowPath(_llint_slow_path_new_array_buffer)
845    dispatch(5)
846
847
848_llint_op_new_regexp:
849    traceExecution()
850    callSlowPath(_llint_slow_path_new_regexp)
851    dispatch(3)
852
853
854_llint_op_less:
855    traceExecution()
856    callSlowPath(_slow_path_less)
857    dispatch(4)
858
859
860_llint_op_lesseq:
861    traceExecution()
862    callSlowPath(_slow_path_lesseq)
863    dispatch(4)
864
865
866_llint_op_greater:
867    traceExecution()
868    callSlowPath(_slow_path_greater)
869    dispatch(4)
870
871
872_llint_op_greatereq:
873    traceExecution()
874    callSlowPath(_slow_path_greatereq)
875    dispatch(4)
876
877
878_llint_op_mod:
879    traceExecution()
880    callSlowPath(_slow_path_mod)
881    dispatch(4)
882
883
884_llint_op_typeof:
885    traceExecution()
886    callSlowPath(_slow_path_typeof)
887    dispatch(3)
888
889
890_llint_op_is_object:
891    traceExecution()
892    callSlowPath(_slow_path_is_object)
893    dispatch(3)
894
895
896_llint_op_is_function:
897    traceExecution()
898    callSlowPath(_slow_path_is_function)
899    dispatch(3)
900
901
902_llint_op_in:
903    traceExecution()
904    callSlowPath(_slow_path_in)
905    dispatch(4)
906
907macro withInlineStorage(object, propertyStorage, continuation)
908    # Indicate that the object is the property storage, and that the
909    # property storage register is unused.
910    continuation(object, propertyStorage)
911end
912
913macro withOutOfLineStorage(object, propertyStorage, continuation)
914    loadp JSObject::m_butterfly[object], propertyStorage
915    # Indicate that the propertyStorage register now points to the
916    # property storage, and that the object register may be reused
917    # if the object pointer is not needed anymore.
918    continuation(propertyStorage, object)
919end
920
921
922_llint_op_del_by_id:
923    traceExecution()
924    callSlowPath(_llint_slow_path_del_by_id)
925    dispatch(4)
926
927
928_llint_op_del_by_val:
929    traceExecution()
930    callSlowPath(_llint_slow_path_del_by_val)
931    dispatch(4)
932
933
934_llint_op_put_by_index:
935    traceExecution()
936    callSlowPath(_llint_slow_path_put_by_index)
937    dispatch(4)
938
939
940_llint_op_put_getter_setter:
941    traceExecution()
942    callSlowPath(_llint_slow_path_put_getter_setter)
943    dispatch(5)
944
945
946_llint_op_jtrue:
947    traceExecution()
948    jumpTrueOrFalse(
949        macro (value, target) btinz value, target end,
950        _llint_slow_path_jtrue)
951
952
953_llint_op_jfalse:
954    traceExecution()
955    jumpTrueOrFalse(
956        macro (value, target) btiz value, target end,
957        _llint_slow_path_jfalse)
958
959
960_llint_op_jless:
961    traceExecution()
962    compare(
963        macro (left, right, target) bilt left, right, target end,
964        macro (left, right, target) bdlt left, right, target end,
965        _llint_slow_path_jless)
966
967
968_llint_op_jnless:
969    traceExecution()
970    compare(
971        macro (left, right, target) bigteq left, right, target end,
972        macro (left, right, target) bdgtequn left, right, target end,
973        _llint_slow_path_jnless)
974
975
976_llint_op_jgreater:
977    traceExecution()
978    compare(
979        macro (left, right, target) bigt left, right, target end,
980        macro (left, right, target) bdgt left, right, target end,
981        _llint_slow_path_jgreater)
982
983
984_llint_op_jngreater:
985    traceExecution()
986    compare(
987        macro (left, right, target) bilteq left, right, target end,
988        macro (left, right, target) bdltequn left, right, target end,
989        _llint_slow_path_jngreater)
990
991
992_llint_op_jlesseq:
993    traceExecution()
994    compare(
995        macro (left, right, target) bilteq left, right, target end,
996        macro (left, right, target) bdlteq left, right, target end,
997        _llint_slow_path_jlesseq)
998
999
1000_llint_op_jnlesseq:
1001    traceExecution()
1002    compare(
1003        macro (left, right, target) bigt left, right, target end,
1004        macro (left, right, target) bdgtun left, right, target end,
1005        _llint_slow_path_jnlesseq)
1006
1007
1008_llint_op_jgreatereq:
1009    traceExecution()
1010    compare(
1011        macro (left, right, target) bigteq left, right, target end,
1012        macro (left, right, target) bdgteq left, right, target end,
1013        _llint_slow_path_jgreatereq)
1014
1015
1016_llint_op_jngreatereq:
1017    traceExecution()
1018    compare(
1019        macro (left, right, target) bilt left, right, target end,
1020        macro (left, right, target) bdltun left, right, target end,
1021        _llint_slow_path_jngreatereq)
1022
1023
1024_llint_op_loop_hint:
1025    traceExecution()
1026    loadp CodeBlock[cfr], t1
1027    loadp CodeBlock::m_vm[t1], t1
1028    loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
1029    btbnz t0, .handleWatchdogTimer
1030.afterWatchdogTimerCheck:
1031    checkSwitchToJITForLoop()
1032    dispatch(1)
1033.handleWatchdogTimer:
1034    callWatchdogTimerHandler(.throwHandler)
1035    jmp .afterWatchdogTimerCheck
1036.throwHandler:
1037    jmp _llint_throw_from_slow_path_trampoline
1038
1039_llint_op_switch_string:
1040    traceExecution()
1041    callSlowPath(_llint_slow_path_switch_string)
1042    dispatch(0)
1043
1044
1045_llint_op_new_func_exp:
1046    traceExecution()
1047    callSlowPath(_llint_slow_path_new_func_exp)
1048    dispatch(3)
1049
1050
1051_llint_op_call:
1052    traceExecution()
1053    arrayProfileForCall()
1054    doCall(_llint_slow_path_call)
1055
1056
1057_llint_op_construct:
1058    traceExecution()
1059    doCall(_llint_slow_path_construct)
1060
1061
1062_llint_op_call_varargs:
1063    traceExecution()
1064    callSlowPath(_llint_slow_path_size_frame_for_varargs)
1065    branchIfException(_llint_throw_from_slow_path_trampoline)
1066    # calleeFrame in t1
1067    if JSVALUE64
1068        move t1, sp
1069    else
1070        # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1071        if ARMv7
1072            subp t1, CallerFrameAndPCSize, t2
1073            move t2, sp
1074        else
1075            subp t1, CallerFrameAndPCSize, sp
1076        end
1077    end
1078    slowPathForCall(_llint_slow_path_call_varargs)
1079
1080_llint_op_construct_varargs:
1081    traceExecution()
1082    callSlowPath(_llint_slow_path_size_frame_for_varargs)
1083    branchIfException(_llint_throw_from_slow_path_trampoline)
1084    # calleeFrame in t1
1085    if JSVALUE64
1086        move t1, sp
1087    else
1088        # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
1089        if ARMv7
1090            subp t1, CallerFrameAndPCSize, t2
1091            move t2, sp
1092        else
1093            subp t1, CallerFrameAndPCSize, sp
1094        end
1095    end
1096    slowPathForCall(_llint_slow_path_construct_varargs)
1097
1098
1099_llint_op_call_eval:
1100    traceExecution()
1101    
1102    # Eval is executed in one of two modes:
1103    #
1104    # 1) We find that we're really invoking eval() in which case the
1105    #    execution is perfomed entirely inside the slow_path, and it
1106    #    returns the PC of a function that just returns the return value
1107    #    that the eval returned.
1108    #
1109    # 2) We find that we're invoking something called eval() that is not
1110    #    the real eval. Then the slow_path returns the PC of the thing to
1111    #    call, and we call it.
1112    #
1113    # This allows us to handle two cases, which would require a total of
1114    # up to four pieces of state that cannot be easily packed into two
1115    # registers (C functions can return up to two registers, easily):
1116    #
1117    # - The call frame register. This may or may not have been modified
1118    #   by the slow_path, but the convention is that it returns it. It's not
1119    #   totally clear if that's necessary, since the cfr is callee save.
1120    #   But that's our style in this here interpreter so we stick with it.
1121    #
1122    # - A bit to say if the slow_path successfully executed the eval and has
1123    #   the return value, or did not execute the eval but has a PC for us
1124    #   to call.
1125    #
1126    # - Either:
1127    #   - The JS return value (two registers), or
1128    #
1129    #   - The PC to call.
1130    #
1131    # It turns out to be easier to just always have this return the cfr
1132    # and a PC to call, and that PC may be a dummy thunk that just
1133    # returns the JS value that the eval returned.
1134    
1135    slowPathForCall(_llint_slow_path_call_eval)
1136
1137
1138_llint_generic_return_point:
1139    dispatchAfterCall()
1140
1141
1142_llint_op_strcat:
1143    traceExecution()
1144    callSlowPath(_slow_path_strcat)
1145    dispatch(4)
1146
1147
1148_llint_op_get_pnames:
1149    traceExecution()
1150    callSlowPath(_llint_slow_path_get_pnames)
1151    dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
1152
1153
1154_llint_op_push_with_scope:
1155    traceExecution()
1156    callSlowPath(_llint_slow_path_push_with_scope)
1157    dispatch(2)
1158
1159
1160_llint_op_pop_scope:
1161    traceExecution()
1162    callSlowPath(_llint_slow_path_pop_scope)
1163    dispatch(1)
1164
1165
1166_llint_op_push_name_scope:
1167    traceExecution()
1168    callSlowPath(_llint_slow_path_push_name_scope)
1169    dispatch(4)
1170
1171
1172_llint_op_throw:
1173    traceExecution()
1174    callSlowPath(_llint_slow_path_throw)
1175    dispatch(2)
1176
1177
1178_llint_op_throw_static_error:
1179    traceExecution()
1180    callSlowPath(_llint_slow_path_throw_static_error)
1181    dispatch(3)
1182
1183
1184_llint_op_profile_will_call:
1185    traceExecution()
1186    loadp CodeBlock[cfr], t0
1187    loadp CodeBlock::m_vm[t0], t0
1188    loadi VM::m_enabledProfiler[t0], t0
1189    btpz t0, .opProfilerWillCallDone
1190    callSlowPath(_llint_slow_path_profile_will_call)
1191.opProfilerWillCallDone:
1192    dispatch(2)
1193
1194
1195_llint_op_profile_did_call:
1196    traceExecution()
1197    loadp CodeBlock[cfr], t0
1198    loadp CodeBlock::m_vm[t0], t0
1199    loadi VM::m_enabledProfiler[t0], t0
1200    btpz t0, .opProfilerDidCallDone
1201    callSlowPath(_llint_slow_path_profile_did_call)
1202.opProfilerDidCallDone:
1203    dispatch(2)
1204
1205
1206_llint_op_debug:
1207    traceExecution()
1208    loadp CodeBlock[cfr], t0
1209    loadi CodeBlock::m_debuggerRequests[t0], t0
1210    btiz t0, .opDebugDone
1211    callSlowPath(_llint_slow_path_debug)
1212.opDebugDone:                    
1213    dispatch(3)
1214
1215
1216_llint_native_call_trampoline:
1217    nativeCallTrampoline(NativeExecutable::m_function)
1218
1219
1220_llint_native_construct_trampoline:
1221    nativeCallTrampoline(NativeExecutable::m_constructor)
1222
1223
1224# Lastly, make sure that we can link even though we don't support all opcodes.
1225# These opcodes should never arise when using LLInt or either JIT. We assert
1226# as much.
1227
1228macro notSupported()
1229    if ASSERT_ENABLED
1230        crash()
1231    else
1232        # We should use whatever the smallest possible instruction is, just to
1233        # ensure that there is a gap between instruction labels. If multiple
1234        # smallest instructions exist, we should pick the one that is most
1235        # likely result in execution being halted. Currently that is the break
1236        # instruction on all architectures we're interested in. (Break is int3
1237        # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
1238        break
1239    end
1240end
1241
1242_llint_op_init_global_const_nop:
1243    dispatch(5)
1244
1245