1/*
2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef JITInlines_h
27#define JITInlines_h
28
29
30#if ENABLE(JIT)
31
32namespace JSC {
33
34ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
35{
36    return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
37}
38
39ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
40{
41    ASSERT(m_codeBlock->isConstantRegisterIndex(src));
42    return m_codeBlock->getConstant(src);
43}
44
45ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
46{
47#if USE(JSVALUE32_64)
48    store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
49    store32(from, intPayloadFor(entry, callFrameRegister));
50#else
51    store64(from, addressFor(entry, callFrameRegister));
52#endif
53}
54
55ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
56{
57    loadPtr(Address(from, entry * sizeof(Register)), to);
58#if USE(JSVALUE64)
59    killLastResultRegister();
60#endif
61}
62
63ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
64{
65    load32(Address(from, entry * sizeof(Register)), to);
66#if USE(JSVALUE64)
67    killLastResultRegister();
68#endif
69}
70
71#if USE(JSVALUE64)
72ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
73{
74    load64(Address(from, entry * sizeof(Register)), to);
75    killLastResultRegister();
76}
77#endif
78
79ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
80{
81    failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
82    failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
83    loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
84    failures.append(branchTest32(Zero, dst));
85    loadPtr(MacroAssembler::Address(dst, StringImpl::flagsOffset()), regT1);
86    loadPtr(MacroAssembler::Address(dst, StringImpl::dataOffset()), dst);
87
88    JumpList is16Bit;
89    JumpList cont8Bit;
90    is16Bit.append(branchTest32(Zero, regT1, TrustedImm32(StringImpl::flagIs8Bit())));
91    load8(MacroAssembler::Address(dst, 0), dst);
92    cont8Bit.append(jump());
93    is16Bit.link(this);
94    load16(MacroAssembler::Address(dst, 0), dst);
95    cont8Bit.link(this);
96}
97
98ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
99{
100    ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
101
102    Call nakedCall = nearCall();
103    m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
104    return nakedCall;
105}
106
107ALWAYS_INLINE bool JIT::atJumpTarget()
108{
109    while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
110        if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
111            return true;
112        ++m_jumpTargetsPosition;
113    }
114    return false;
115}
116
117#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
118
119ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
120{
121#if CPU(ARM_TRADITIONAL)
122#ifndef NDEBUG
123    // Ensure the label after the sequence can also fit
124    insnSpace += sizeof(ARMWord);
125    constSpace += sizeof(uint64_t);
126#endif
127
128    ensureSpace(insnSpace, constSpace);
129
130#elif CPU(SH4)
131#ifndef NDEBUG
132    insnSpace += sizeof(SH4Word);
133    constSpace += sizeof(uint64_t);
134#endif
135
136    m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
137#endif
138
139#ifndef NDEBUG
140    m_uninterruptedInstructionSequenceBegin = label();
141    m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
142#endif
143}
144
145ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
146{
147#ifndef NDEBUG
148    /* There are several cases when the uninterrupted sequence is larger than
149     * maximum required offset for pathing the same sequence. Eg.: if in a
150     * uninterrupted sequence the last macroassembler's instruction is a stub
151     * call, it emits store instruction(s) which should not be included in the
152     * calculation of length of uninterrupted sequence. So, the insnSpace and
153     * constSpace should be upper limit instead of hard limit.
154     */
155
156#if CPU(SH4)
157    if ((dst > 15) || (dst < -16)) {
158        insnSpace += 8;
159        constSpace += 2;
160    }
161
162    if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
163        insnSpace += 8;
164#else
165    UNUSED_PARAM(dst);
166#endif
167
168    ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
169    ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
170#else
171    UNUSED_PARAM(insnSpace);
172    UNUSED_PARAM(constSpace);
173    UNUSED_PARAM(dst);
174#endif
175}
176
177#endif // ASSEMBLER_HAS_CONSTANT_POOL
178
179ALWAYS_INLINE void JIT::updateTopCallFrame()
180{
181    ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
182    if (m_bytecodeOffset) {
183#if USE(JSVALUE32_64)
184        storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
185#else
186        store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
187#endif
188    }
189    storePtr(callFrameRegister, &m_vm->topCallFrame);
190}
191
192ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
193{
194#if CPU(X86)
195    // Within a trampoline the return address will be on the stack at this point.
196    addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
197#elif CPU(ARM)
198    move(stackPointerRegister, firstArgumentRegister);
199#elif CPU(SH4)
200    move(stackPointerRegister, firstArgumentRegister);
201#endif
202    // In the trampoline on x86-64, the first argument register is not overwritten.
203}
204
205ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
206{
207    return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
208}
209
210ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
211{
212    if (!m_codeBlock->isKnownNotImmediate(vReg))
213        linkSlowCase(iter);
214}
215
216ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
217{
218    ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
219
220    m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
221}
222
223ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
224{
225    ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
226
227    const JumpList::JumpVector& jumpVector = jumpList.jumps();
228    size_t size = jumpVector.size();
229    for (size_t i = 0; i < size; ++i)
230        m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
231}
232
233ALWAYS_INLINE void JIT::addSlowCase()
234{
235    ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
236
237    Jump emptyJump; // Doing it this way to make Windows happy.
238    m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
239}
240
241ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
242{
243    ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
244
245    m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
246}
247
248ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
249{
250    ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
251
252    jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
253}
254
255ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
256{
257    return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
258}
259
260#if ENABLE(SAMPLING_FLAGS)
261ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
262{
263    ASSERT(flag >= 1);
264    ASSERT(flag <= 32);
265    or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
266}
267
268ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
269{
270    ASSERT(flag >= 1);
271    ASSERT(flag <= 32);
272    and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
273}
274#endif
275
276#if ENABLE(SAMPLING_COUNTERS)
277ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, int32_t count)
278{
279    add64(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
280}
281#endif
282
283#if ENABLE(OPCODE_SAMPLING)
284#if CPU(X86_64)
285ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
286{
287    move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
288    storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
289}
290#else
291ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
292{
293    storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
294}
295#endif
296#endif
297
298#if ENABLE(CODEBLOCK_SAMPLING)
299#if CPU(X86_64)
300ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
301{
302    move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
303    storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
304}
305#else
306ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
307{
308    storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
309}
310#endif
311#endif
312
313ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
314{
315    return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
316}
317
318template<typename StructureType>
319inline void JIT::emitAllocateJSObject(RegisterID allocator, StructureType structure, RegisterID result, RegisterID scratch)
320{
321    loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
322    addSlowCase(branchTestPtr(Zero, result));
323
324    // remove the object from the free list
325    loadPtr(Address(result), scratch);
326    storePtr(scratch, Address(allocator, MarkedAllocator::offsetOfFreeListHead()));
327
328    // initialize the object's structure
329    storePtr(structure, Address(result, JSCell::structureOffset()));
330
331    // initialize the object's property storage pointer
332    storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset()));
333}
334
335#if ENABLE(VALUE_PROFILER)
336inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
337{
338    ASSERT(shouldEmitProfiling());
339    ASSERT(valueProfile);
340
341    const RegisterID value = regT0;
342#if USE(JSVALUE32_64)
343    const RegisterID valueTag = regT1;
344#endif
345    const RegisterID scratch = regT3;
346
347    if (ValueProfile::numberOfBuckets == 1) {
348        // We're in a simple configuration: only one bucket, so we can just do a direct
349        // store.
350#if USE(JSVALUE64)
351        store64(value, valueProfile->m_buckets);
352#else
353        EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
354        store32(value, &descriptor->asBits.payload);
355        store32(valueTag, &descriptor->asBits.tag);
356#endif
357        return;
358    }
359
360    if (m_randomGenerator.getUint32() & 1)
361        add32(TrustedImm32(1), bucketCounterRegister);
362    else
363        add32(TrustedImm32(3), bucketCounterRegister);
364    and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
365    move(TrustedImmPtr(valueProfile->m_buckets), scratch);
366#if USE(JSVALUE64)
367    store64(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
368#elif USE(JSVALUE32_64)
369    store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
370    store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
371#endif
372}
373
374inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset)
375{
376    if (!shouldEmitProfiling())
377        return;
378    emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset));
379}
380
381inline void JIT::emitValueProfilingSite()
382{
383    emitValueProfilingSite(m_bytecodeOffset);
384}
385#endif // ENABLE(VALUE_PROFILER)
386
387inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
388{
389    UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now.
390
391    RegisterID structure = structureAndIndexingType;
392    RegisterID indexingType = structureAndIndexingType;
393
394    if (shouldEmitProfiling())
395        storePtr(structure, arrayProfile->addressOfLastSeenStructure());
396
397    load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
398}
399
400inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
401{
402#if ENABLE(VALUE_PROFILER)
403    emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
404#else
405    UNUSED_PARAM(bytecodeIndex);
406    emitArrayProfilingSite(structureAndIndexingType, scratch, 0);
407#endif
408}
409
410inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
411{
412#if ENABLE(VALUE_PROFILER)
413    store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole());
414#else
415    UNUSED_PARAM(arrayProfile);
416#endif
417}
418
419inline void JIT::emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile* arrayProfile)
420{
421#if ENABLE(VALUE_PROFILER)
422    store8(TrustedImm32(1), arrayProfile->addressOfOutOfBounds());
423#else
424    UNUSED_PARAM(arrayProfile);
425#endif
426}
427
428static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capability)
429{
430#if ENABLE(VALUE_PROFILER)
431    return arrayModesInclude(arrayModes, capability);
432#else
433    UNUSED_PARAM(arrayModes);
434    UNUSED_PARAM(capability);
435    return false;
436#endif
437}
438
439inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
440{
441#if ENABLE(VALUE_PROFILER)
442    profile->computeUpdatedPrediction(m_codeBlock);
443    ArrayModes arrayModes = profile->observedArrayModes();
444    if (arrayProfileSaw(arrayModes, DoubleShape))
445        return JITDouble;
446    if (arrayProfileSaw(arrayModes, Int32Shape))
447        return JITInt32;
448    if (arrayProfileSaw(arrayModes, ArrayStorageShape))
449        return JITArrayStorage;
450    return JITContiguous;
451#else
452    UNUSED_PARAM(profile);
453    return JITContiguous;
454#endif
455}
456
457#if USE(JSVALUE32_64)
458
459inline void JIT::emitLoadTag(int index, RegisterID tag)
460{
461    RegisterID mappedTag;
462    if (getMappedTag(index, mappedTag)) {
463        move(mappedTag, tag);
464        unmap(tag);
465        return;
466    }
467
468    if (m_codeBlock->isConstantRegisterIndex(index)) {
469        move(Imm32(getConstantOperand(index).tag()), tag);
470        unmap(tag);
471        return;
472    }
473
474    load32(tagFor(index), tag);
475    unmap(tag);
476}
477
478inline void JIT::emitLoadPayload(int index, RegisterID payload)
479{
480    RegisterID mappedPayload;
481    if (getMappedPayload(index, mappedPayload)) {
482        move(mappedPayload, payload);
483        unmap(payload);
484        return;
485    }
486
487    if (m_codeBlock->isConstantRegisterIndex(index)) {
488        move(Imm32(getConstantOperand(index).payload()), payload);
489        unmap(payload);
490        return;
491    }
492
493    load32(payloadFor(index), payload);
494    unmap(payload);
495}
496
497inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
498{
499    move(Imm32(v.payload()), payload);
500    move(Imm32(v.tag()), tag);
501}
502
503inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
504{
505    RELEASE_ASSERT(tag != payload);
506
507    if (base == callFrameRegister) {
508        RELEASE_ASSERT(payload != base);
509        emitLoadPayload(index, payload);
510        emitLoadTag(index, tag);
511        return;
512    }
513
514    if (payload == base) { // avoid stomping base
515        load32(tagFor(index, base), tag);
516        load32(payloadFor(index, base), payload);
517        return;
518    }
519
520    load32(payloadFor(index, base), payload);
521    load32(tagFor(index, base), tag);
522}
523
524inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
525{
526    if (isMapped(index1)) {
527        emitLoad(index1, tag1, payload1);
528        emitLoad(index2, tag2, payload2);
529        return;
530    }
531    emitLoad(index2, tag2, payload2);
532    emitLoad(index1, tag1, payload1);
533}
534
535inline void JIT::emitLoadDouble(int index, FPRegisterID value)
536{
537    if (m_codeBlock->isConstantRegisterIndex(index)) {
538        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
539        loadDouble(&inConstantPool, value);
540    } else
541        loadDouble(addressFor(index), value);
542}
543
544inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
545{
546    if (m_codeBlock->isConstantRegisterIndex(index)) {
547        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
548        char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
549        convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
550    } else
551        convertInt32ToDouble(payloadFor(index), value);
552}
553
554inline void JIT::emitStore(int index, RegisterID tag, RegisterID payload, RegisterID base)
555{
556    store32(payload, payloadFor(index, base));
557    store32(tag, tagFor(index, base));
558}
559
560inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32)
561{
562    store32(payload, payloadFor(index, callFrameRegister));
563    if (!indexIsInt32)
564        store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
565}
566
567inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength)
568{
569    emitStoreInt32(index, payload, indexIsInt32);
570    map(m_bytecodeOffset + opcodeLength, index, tag, payload);
571}
572
573inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
574{
575    store32(payload, payloadFor(index, callFrameRegister));
576    if (!indexIsInt32)
577        store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
578}
579
580inline void JIT::emitStoreCell(int index, RegisterID payload, bool indexIsCell)
581{
582    store32(payload, payloadFor(index, callFrameRegister));
583    if (!indexIsCell)
584        store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
585}
586
587inline void JIT::emitStoreBool(int index, RegisterID payload, bool indexIsBool)
588{
589    store32(payload, payloadFor(index, callFrameRegister));
590    if (!indexIsBool)
591        store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
592}
593
594inline void JIT::emitStoreDouble(int index, FPRegisterID value)
595{
596    storeDouble(value, addressFor(index));
597}
598
599inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
600{
601    store32(Imm32(constant.payload()), payloadFor(index, base));
602    store32(Imm32(constant.tag()), tagFor(index, base));
603}
604
605ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
606{
607    emitStore(dst, jsUndefined());
608}
609
610inline bool JIT::isLabeled(unsigned bytecodeOffset)
611{
612    for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
613        unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
614        if (jumpTarget == bytecodeOffset)
615            return true;
616        if (jumpTarget > bytecodeOffset)
617            return false;
618    }
619    return false;
620}
621
622inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload)
623{
624    if (isLabeled(bytecodeOffset))
625        return;
626
627    m_mappedBytecodeOffset = bytecodeOffset;
628    m_mappedVirtualRegisterIndex = virtualRegisterIndex;
629    m_mappedTag = tag;
630    m_mappedPayload = payload;
631
632    ASSERT(!canBeOptimizedOrInlined() || m_mappedPayload == regT0);
633    ASSERT(!canBeOptimizedOrInlined() || m_mappedTag == regT1);
634}
635
636inline void JIT::unmap(RegisterID registerID)
637{
638    if (m_mappedTag == registerID)
639        m_mappedTag = (RegisterID)-1;
640    else if (m_mappedPayload == registerID)
641        m_mappedPayload = (RegisterID)-1;
642}
643
644inline void JIT::unmap()
645{
646    m_mappedBytecodeOffset = (unsigned)-1;
647    m_mappedVirtualRegisterIndex = JSStack::ReturnPC;
648    m_mappedTag = (RegisterID)-1;
649    m_mappedPayload = (RegisterID)-1;
650}
651
652inline bool JIT::isMapped(int virtualRegisterIndex)
653{
654    if (m_mappedBytecodeOffset != m_bytecodeOffset)
655        return false;
656    if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
657        return false;
658    return true;
659}
660
661inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload)
662{
663    if (m_mappedBytecodeOffset != m_bytecodeOffset)
664        return false;
665    if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
666        return false;
667    if (m_mappedPayload == (RegisterID)-1)
668        return false;
669    payload = m_mappedPayload;
670    return true;
671}
672
673inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag)
674{
675    if (m_mappedBytecodeOffset != m_bytecodeOffset)
676        return false;
677    if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
678        return false;
679    if (m_mappedTag == (RegisterID)-1)
680        return false;
681    tag = m_mappedTag;
682    return true;
683}
684
685inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
686{
687    if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
688        if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
689            addSlowCase(jump());
690        else
691            addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
692    }
693}
694
695inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag)
696{
697    if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
698        if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
699            addSlowCase(jump());
700        else
701            addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
702    }
703}
704
705ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
706{
707    return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
708}
709
710ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
711{
712    if (isOperandConstantImmediateInt(op1)) {
713        constant = getConstantOperand(op1).asInt32();
714        op = op2;
715        return true;
716    }
717
718    if (isOperandConstantImmediateInt(op2)) {
719        constant = getConstantOperand(op2).asInt32();
720        op = op1;
721        return true;
722    }
723
724    return false;
725}
726
727#else // USE(JSVALUE32_64)
728
729/* Deprecated: Please use JITStubCall instead. */
730
731ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
732{
733    unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
734    peek64(dst, argumentStackOffset);
735}
736
737ALWAYS_INLINE void JIT::killLastResultRegister()
738{
739    m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
740}
741
742// get arg puts an arg from the SF register array into a h/w register
743ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
744{
745    ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
746
747    // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
748    if (m_codeBlock->isConstantRegisterIndex(src)) {
749        JSValue value = m_codeBlock->getConstant(src);
750        if (!value.isNumber())
751            move(TrustedImm64(JSValue::encode(value)), dst);
752        else
753            move(Imm64(JSValue::encode(value)), dst);
754        killLastResultRegister();
755        return;
756    }
757
758    if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
759        // The argument we want is already stored in eax
760        if (dst != cachedResultRegister)
761            move(cachedResultRegister, dst);
762        killLastResultRegister();
763        return;
764    }
765
766    load64(Address(callFrameRegister, src * sizeof(Register)), dst);
767    killLastResultRegister();
768}
769
770ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
771{
772    if (src2 == m_lastResultBytecodeRegister) {
773        emitGetVirtualRegister(src2, dst2);
774        emitGetVirtualRegister(src1, dst1);
775    } else {
776        emitGetVirtualRegister(src1, dst1);
777        emitGetVirtualRegister(src2, dst2);
778    }
779}
780
781ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
782{
783    return getConstantOperand(src).asInt32();
784}
785
786ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
787{
788    return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
789}
790
791ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
792{
793    store64(from, Address(callFrameRegister, dst * sizeof(Register)));
794    m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
795}
796
797ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
798{
799    store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
800}
801
802ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
803{
804    return branchTest64(Zero, reg, tagMaskRegister);
805}
806
807ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
808{
809    move(reg1, scratch);
810    or64(reg2, scratch);
811    return emitJumpIfJSCell(scratch);
812}
813
814ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
815{
816    addSlowCase(emitJumpIfJSCell(reg));
817}
818
819ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
820{
821    addSlowCase(emitJumpIfNotJSCell(reg));
822}
823
824ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
825{
826    if (!m_codeBlock->isKnownNotImmediate(vReg))
827        emitJumpSlowCaseIfNotJSCell(reg);
828}
829
830inline void JIT::emitLoadDouble(int index, FPRegisterID value)
831{
832    if (m_codeBlock->isConstantRegisterIndex(index)) {
833        WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
834        loadDouble(&inConstantPool, value);
835    } else
836        loadDouble(addressFor(index), value);
837}
838
839inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
840{
841    if (m_codeBlock->isConstantRegisterIndex(index)) {
842        ASSERT(isOperandConstantImmediateInt(index));
843        convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
844    } else
845        convertInt32ToDouble(addressFor(index), value);
846}
847
848ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
849{
850    return branch64(AboveOrEqual, reg, tagTypeNumberRegister);
851}
852
853ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
854{
855    return branch64(Below, reg, tagTypeNumberRegister);
856}
857
858ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
859{
860    move(reg1, scratch);
861    and64(reg2, scratch);
862    return emitJumpIfNotImmediateInteger(scratch);
863}
864
865ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
866{
867    addSlowCase(emitJumpIfNotImmediateInteger(reg));
868}
869
870ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
871{
872    addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
873}
874
875ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
876{
877    addSlowCase(emitJumpIfNotImmediateNumber(reg));
878}
879
880ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
881{
882    emitFastArithIntToImmNoCheck(src, dest);
883}
884
885ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
886{
887    or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
888}
889
890#endif // USE(JSVALUE32_64)
891
892} // namespace JSC
893
894#endif // ENABLE(JIT)
895
896#endif // JITInlines_h
897
898