1/*
2 * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerARM64_h
27#define MacroAssemblerARM64_h
28
29#if ENABLE(ASSEMBLER)
30
31#include "ARM64Assembler.h"
32#include "AbstractMacroAssembler.h"
33#include <wtf/MathExtras.h>
34
35namespace JSC {
36
37class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
38    static const RegisterID dataTempRegister = ARM64Registers::ip0;
39    static const RegisterID memoryTempRegister = ARM64Registers::ip1;
40    static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
41    static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
42    static const intptr_t maskHalfWord0 = 0xffffl;
43    static const intptr_t maskHalfWord1 = 0xffff0000l;
44    static const intptr_t maskUpperWord = 0xffffffff00000000l;
45
46    // 4 instructions - 3 to load the function pointer, + blr.
47    static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
48
49public:
50    MacroAssemblerARM64()
51        : m_dataMemoryTempRegister(this, dataTempRegister)
52        , m_cachedMemoryTempRegister(this, memoryTempRegister)
53        , m_makeJumpPatchable(false)
54    {
55    }
56
57    typedef ARM64Assembler::LinkRecord LinkRecord;
58    typedef ARM64Assembler::JumpType JumpType;
59    typedef ARM64Assembler::JumpLinkType JumpLinkType;
60    typedef ARM64Assembler::Condition Condition;
61
62    static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
63    static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
64
65    Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
66    void* unlinkedCode() { return m_assembler.unlinkedCode(); }
67    static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
68    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
69    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
70    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
71    static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); }
72
73    static const Scale ScalePtr = TimesEight;
74
75    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
76    {
77        // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
78        return !(value & ~0x3ff8);
79    }
80
81    enum RelationalCondition {
82        Equal = ARM64Assembler::ConditionEQ,
83        NotEqual = ARM64Assembler::ConditionNE,
84        Above = ARM64Assembler::ConditionHI,
85        AboveOrEqual = ARM64Assembler::ConditionHS,
86        Below = ARM64Assembler::ConditionLO,
87        BelowOrEqual = ARM64Assembler::ConditionLS,
88        GreaterThan = ARM64Assembler::ConditionGT,
89        GreaterThanOrEqual = ARM64Assembler::ConditionGE,
90        LessThan = ARM64Assembler::ConditionLT,
91        LessThanOrEqual = ARM64Assembler::ConditionLE
92    };
93
94    enum ResultCondition {
95        Overflow = ARM64Assembler::ConditionVS,
96        Signed = ARM64Assembler::ConditionMI,
97        PositiveOrZero = ARM64Assembler::ConditionPL,
98        Zero = ARM64Assembler::ConditionEQ,
99        NonZero = ARM64Assembler::ConditionNE
100    };
101
102    enum ZeroCondition {
103        IsZero = ARM64Assembler::ConditionEQ,
104        IsNonZero = ARM64Assembler::ConditionNE
105    };
106
107    enum DoubleCondition {
108        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
109        DoubleEqual = ARM64Assembler::ConditionEQ,
110        DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
111        DoubleGreaterThan = ARM64Assembler::ConditionGT,
112        DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
113        DoubleLessThan = ARM64Assembler::ConditionLO,
114        DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
115        // If either operand is NaN, these conditions always evaluate to true.
116        DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
117        DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
118        DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
119        DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
120        DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
121        DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
122    };
123
124    static const RegisterID stackPointerRegister = ARM64Registers::sp;
125    static const RegisterID framePointerRegister = ARM64Registers::fp;
126    static const RegisterID linkRegister = ARM64Registers::lr;
127
128    // FIXME: Get reasonable implementations for these
129    static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
130    static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
131
132    // Integer operations:
133
134    void add32(RegisterID src, RegisterID dest)
135    {
136        m_assembler.add<32>(dest, dest, src);
137    }
138
139    void add32(TrustedImm32 imm, RegisterID dest)
140    {
141        add32(imm, dest, dest);
142    }
143
144    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
145    {
146        if (isUInt12(imm.m_value))
147            m_assembler.add<32>(dest, src, UInt12(imm.m_value));
148        else if (isUInt12(-imm.m_value))
149            m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
150        else {
151            move(imm, getCachedDataTempRegisterIDAndInvalidate());
152            m_assembler.add<32>(dest, src, dataTempRegister);
153        }
154    }
155
156    void add32(TrustedImm32 imm, Address address)
157    {
158        load32(address, getCachedDataTempRegisterIDAndInvalidate());
159
160        if (isUInt12(imm.m_value))
161            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
162        else if (isUInt12(-imm.m_value))
163            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
164        else {
165            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
166            m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
167        }
168
169        store32(dataTempRegister, address);
170    }
171
172    void add32(TrustedImm32 imm, AbsoluteAddress address)
173    {
174        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
175
176        if (isUInt12(imm.m_value)) {
177            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
178            store32(dataTempRegister, address.m_ptr);
179            return;
180        }
181
182        if (isUInt12(-imm.m_value)) {
183            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
184            store32(dataTempRegister, address.m_ptr);
185            return;
186        }
187
188        move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
189        m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
190        store32(dataTempRegister, address.m_ptr);
191    }
192
193    void add32(Address src, RegisterID dest)
194    {
195        load32(src, getCachedDataTempRegisterIDAndInvalidate());
196        add32(dataTempRegister, dest);
197    }
198
199    void add64(RegisterID src, RegisterID dest)
200    {
201        if (src == ARM64Registers::sp)
202            m_assembler.add<64>(dest, src, dest);
203        else
204            m_assembler.add<64>(dest, dest, src);
205    }
206
207    void add64(TrustedImm32 imm, RegisterID dest)
208    {
209        if (isUInt12(imm.m_value)) {
210            m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
211            return;
212        }
213        if (isUInt12(-imm.m_value)) {
214            m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
215            return;
216        }
217
218        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
219        m_assembler.add<64>(dest, dest, dataTempRegister);
220    }
221
222    void add64(TrustedImm64 imm, RegisterID dest)
223    {
224        intptr_t immediate = imm.m_value;
225
226        if (isUInt12(immediate)) {
227            m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
228            return;
229        }
230        if (isUInt12(-immediate)) {
231            m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
232            return;
233        }
234
235        move(imm, getCachedDataTempRegisterIDAndInvalidate());
236        m_assembler.add<64>(dest, dest, dataTempRegister);
237    }
238
239    void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
240    {
241        if (isUInt12(imm.m_value)) {
242            m_assembler.add<64>(dest, src, UInt12(imm.m_value));
243            return;
244        }
245        if (isUInt12(-imm.m_value)) {
246            m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
247            return;
248        }
249
250        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
251        m_assembler.add<64>(dest, src, dataTempRegister);
252    }
253
254    void add64(TrustedImm32 imm, Address address)
255    {
256        load64(address, getCachedDataTempRegisterIDAndInvalidate());
257
258        if (isUInt12(imm.m_value))
259            m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
260        else if (isUInt12(-imm.m_value))
261            m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
262        else {
263            signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
264            m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
265        }
266
267        store64(dataTempRegister, address);
268    }
269
270    void add64(TrustedImm32 imm, AbsoluteAddress address)
271    {
272        load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
273
274        if (isUInt12(imm.m_value)) {
275            m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
276            store64(dataTempRegister, address.m_ptr);
277            return;
278        }
279
280        if (isUInt12(-imm.m_value)) {
281            m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
282            store64(dataTempRegister, address.m_ptr);
283            return;
284        }
285
286        signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
287        m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
288        store64(dataTempRegister, address.m_ptr);
289    }
290
291    void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
292    {
293        add64(imm, srcDest);
294    }
295
296    void add64(Address src, RegisterID dest)
297    {
298        load64(src, getCachedDataTempRegisterIDAndInvalidate());
299        m_assembler.add<64>(dest, dest, dataTempRegister);
300    }
301
302    void add64(AbsoluteAddress src, RegisterID dest)
303    {
304        load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
305        m_assembler.add<64>(dest, dest, dataTempRegister);
306    }
307
308    void and32(RegisterID src, RegisterID dest)
309    {
310        and32(dest, src, dest);
311    }
312
313    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
314    {
315        m_assembler.and_<32>(dest, op1, op2);
316    }
317
318    void and32(TrustedImm32 imm, RegisterID dest)
319    {
320        and32(imm, dest, dest);
321    }
322
323    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
324    {
325        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
326
327        if (logicalImm.isValid()) {
328            m_assembler.and_<32>(dest, src, logicalImm);
329            return;
330        }
331
332        move(imm, getCachedDataTempRegisterIDAndInvalidate());
333        m_assembler.and_<32>(dest, src, dataTempRegister);
334    }
335
336    void and32(Address src, RegisterID dest)
337    {
338        load32(src, dataTempRegister);
339        and32(dataTempRegister, dest);
340    }
341
342    void and64(RegisterID src, RegisterID dest)
343    {
344        m_assembler.and_<64>(dest, dest, src);
345    }
346
347    void and64(TrustedImm32 imm, RegisterID dest)
348    {
349        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
350
351        if (logicalImm.isValid()) {
352            m_assembler.and_<64>(dest, dest, logicalImm);
353            return;
354        }
355
356        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
357        m_assembler.and_<64>(dest, dest, dataTempRegister);
358    }
359
360    void and64(TrustedImmPtr imm, RegisterID dest)
361    {
362        LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
363
364        if (logicalImm.isValid()) {
365            m_assembler.and_<64>(dest, dest, logicalImm);
366            return;
367        }
368
369        move(imm, getCachedDataTempRegisterIDAndInvalidate());
370        m_assembler.and_<64>(dest, dest, dataTempRegister);
371    }
372
373    void countLeadingZeros32(RegisterID src, RegisterID dest)
374    {
375        m_assembler.clz<32>(dest, src);
376    }
377
378    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
379    {
380        m_assembler.lsl<32>(dest, src, shiftAmount);
381    }
382
383    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
384    {
385        m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
386    }
387
388    void lshift32(RegisterID shiftAmount, RegisterID dest)
389    {
390        lshift32(dest, shiftAmount, dest);
391    }
392
393    void lshift32(TrustedImm32 imm, RegisterID dest)
394    {
395        lshift32(dest, imm, dest);
396    }
397
398    void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
399    {
400        m_assembler.lsl<64>(dest, src, shiftAmount);
401    }
402
403    void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
404    {
405        m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
406    }
407
408    void lshift64(RegisterID shiftAmount, RegisterID dest)
409    {
410        lshift64(dest, shiftAmount, dest);
411    }
412
413    void lshift64(TrustedImm32 imm, RegisterID dest)
414    {
415        lshift64(dest, imm, dest);
416    }
417
418    void mul32(RegisterID src, RegisterID dest)
419    {
420        m_assembler.mul<32>(dest, dest, src);
421    }
422
423    void mul64(RegisterID src, RegisterID dest)
424    {
425        m_assembler.mul<64>(dest, dest, src);
426    }
427
428    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
429    {
430        move(imm, getCachedDataTempRegisterIDAndInvalidate());
431        m_assembler.mul<32>(dest, src, dataTempRegister);
432    }
433
434    void neg32(RegisterID dest)
435    {
436        m_assembler.neg<32>(dest, dest);
437    }
438
439    void neg64(RegisterID dest)
440    {
441        m_assembler.neg<64>(dest, dest);
442    }
443
444    void or32(RegisterID src, RegisterID dest)
445    {
446        or32(dest, src, dest);
447    }
448
449    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
450    {
451        m_assembler.orr<32>(dest, op1, op2);
452    }
453
454    void or32(TrustedImm32 imm, RegisterID dest)
455    {
456        or32(imm, dest, dest);
457    }
458
459    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
460    {
461        LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
462
463        if (logicalImm.isValid()) {
464            m_assembler.orr<32>(dest, src, logicalImm);
465            return;
466        }
467
468        move(imm, getCachedDataTempRegisterIDAndInvalidate());
469        m_assembler.orr<32>(dest, src, dataTempRegister);
470    }
471
472    void or32(RegisterID src, AbsoluteAddress address)
473    {
474        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
475        m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
476        store32(dataTempRegister, address.m_ptr);
477    }
478
479    void or32(TrustedImm32 imm, Address address)
480    {
481        load32(address, getCachedDataTempRegisterIDAndInvalidate());
482        or32(imm, dataTempRegister, dataTempRegister);
483        store32(dataTempRegister, address);
484    }
485
486    void or64(RegisterID src, RegisterID dest)
487    {
488        or64(dest, src, dest);
489    }
490
491    void or64(RegisterID op1, RegisterID op2, RegisterID dest)
492    {
493        m_assembler.orr<64>(dest, op1, op2);
494    }
495
496    void or64(TrustedImm32 imm, RegisterID dest)
497    {
498        or64(imm, dest, dest);
499    }
500
501    void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
502    {
503        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
504
505        if (logicalImm.isValid()) {
506            m_assembler.orr<64>(dest, src, logicalImm);
507            return;
508        }
509
510        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
511        m_assembler.orr<64>(dest, src, dataTempRegister);
512    }
513
514    void or64(TrustedImm64 imm, RegisterID dest)
515    {
516        LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
517
518        if (logicalImm.isValid()) {
519            m_assembler.orr<64>(dest, dest, logicalImm);
520            return;
521        }
522
523        move(imm, getCachedDataTempRegisterIDAndInvalidate());
524        m_assembler.orr<64>(dest, dest, dataTempRegister);
525    }
526
527    void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
528    {
529        m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
530    }
531
532    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
533    {
534        m_assembler.asr<32>(dest, src, shiftAmount);
535    }
536
537    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
538    {
539        m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
540    }
541
542    void rshift32(RegisterID shiftAmount, RegisterID dest)
543    {
544        rshift32(dest, shiftAmount, dest);
545    }
546
547    void rshift32(TrustedImm32 imm, RegisterID dest)
548    {
549        rshift32(dest, imm, dest);
550    }
551
552    void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
553    {
554        m_assembler.asr<64>(dest, src, shiftAmount);
555    }
556
557    void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
558    {
559        m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
560    }
561
562    void rshift64(RegisterID shiftAmount, RegisterID dest)
563    {
564        rshift64(dest, shiftAmount, dest);
565    }
566
567    void rshift64(TrustedImm32 imm, RegisterID dest)
568    {
569        rshift64(dest, imm, dest);
570    }
571
572    void sub32(RegisterID src, RegisterID dest)
573    {
574        m_assembler.sub<32>(dest, dest, src);
575    }
576
577    void sub32(TrustedImm32 imm, RegisterID dest)
578    {
579        if (isUInt12(imm.m_value)) {
580            m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
581            return;
582        }
583        if (isUInt12(-imm.m_value)) {
584            m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
585            return;
586        }
587
588        move(imm, getCachedDataTempRegisterIDAndInvalidate());
589        m_assembler.sub<32>(dest, dest, dataTempRegister);
590    }
591
592    void sub32(TrustedImm32 imm, Address address)
593    {
594        load32(address, getCachedDataTempRegisterIDAndInvalidate());
595
596        if (isUInt12(imm.m_value))
597            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
598        else if (isUInt12(-imm.m_value))
599            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
600        else {
601            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
602            m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
603        }
604
605        store32(dataTempRegister, address);
606    }
607
608    void sub32(TrustedImm32 imm, AbsoluteAddress address)
609    {
610        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
611
612        if (isUInt12(imm.m_value)) {
613            m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
614            store32(dataTempRegister, address.m_ptr);
615            return;
616        }
617
618        if (isUInt12(-imm.m_value)) {
619            m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
620            store32(dataTempRegister, address.m_ptr);
621            return;
622        }
623
624        move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
625        m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
626        store32(dataTempRegister, address.m_ptr);
627    }
628
629    void sub32(Address src, RegisterID dest)
630    {
631        load32(src, getCachedDataTempRegisterIDAndInvalidate());
632        sub32(dataTempRegister, dest);
633    }
634
635    void sub64(RegisterID src, RegisterID dest)
636    {
637        m_assembler.sub<64>(dest, dest, src);
638    }
639
640    void sub64(TrustedImm32 imm, RegisterID dest)
641    {
642        if (isUInt12(imm.m_value)) {
643            m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
644            return;
645        }
646        if (isUInt12(-imm.m_value)) {
647            m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
648            return;
649        }
650
651        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
652        m_assembler.sub<64>(dest, dest, dataTempRegister);
653    }
654
655    void sub64(TrustedImm64 imm, RegisterID dest)
656    {
657        intptr_t immediate = imm.m_value;
658
659        if (isUInt12(immediate)) {
660            m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
661            return;
662        }
663        if (isUInt12(-immediate)) {
664            m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
665            return;
666        }
667
668        move(imm, getCachedDataTempRegisterIDAndInvalidate());
669        m_assembler.sub<64>(dest, dest, dataTempRegister);
670    }
671
672    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
673    {
674        m_assembler.lsr<32>(dest, src, shiftAmount);
675    }
676
677    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
678    {
679        m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
680    }
681
682    void urshift32(RegisterID shiftAmount, RegisterID dest)
683    {
684        urshift32(dest, shiftAmount, dest);
685    }
686
687    void urshift32(TrustedImm32 imm, RegisterID dest)
688    {
689        urshift32(dest, imm, dest);
690    }
691
692    void xor32(RegisterID src, RegisterID dest)
693    {
694        xor32(dest, src, dest);
695    }
696
697    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
698    {
699        m_assembler.eor<32>(dest, op1, op2);
700    }
701
702    void xor32(TrustedImm32 imm, RegisterID dest)
703    {
704        xor32(imm, dest, dest);
705    }
706
707    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
708    {
709        if (imm.m_value == -1)
710            m_assembler.mvn<32>(dest, src);
711        else {
712            LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
713
714            if (logicalImm.isValid()) {
715                m_assembler.eor<32>(dest, src, logicalImm);
716                return;
717            }
718
719            move(imm, getCachedDataTempRegisterIDAndInvalidate());
720            m_assembler.eor<32>(dest, src, dataTempRegister);
721        }
722    }
723
724    void xor64(RegisterID src, Address address)
725    {
726        load64(address, getCachedDataTempRegisterIDAndInvalidate());
727        m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
728        store64(dataTempRegister, address);
729    }
730
731    void xor64(RegisterID src, RegisterID dest)
732    {
733        xor64(dest, src, dest);
734    }
735
736    void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
737    {
738        m_assembler.eor<64>(dest, op1, op2);
739    }
740
741    void xor64(TrustedImm32 imm, RegisterID dest)
742    {
743        xor64(imm, dest, dest);
744    }
745
746    void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
747    {
748        if (imm.m_value == -1)
749            m_assembler.mvn<64>(dest, src);
750        else {
751            LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
752
753            if (logicalImm.isValid()) {
754                m_assembler.eor<64>(dest, src, logicalImm);
755                return;
756            }
757
758            signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
759            m_assembler.eor<64>(dest, src, dataTempRegister);
760        }
761    }
762
763
764    // Memory access operations:
765
766    void load64(ImplicitAddress address, RegisterID dest)
767    {
768        if (tryLoadWithOffset<64>(dest, address.base, address.offset))
769            return;
770
771        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
772        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
773    }
774
775    void load64(BaseIndex address, RegisterID dest)
776    {
777        if (!address.offset && (!address.scale || address.scale == 3)) {
778            m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
779            return;
780        }
781
782        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
783        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
784        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
785    }
786
787    void load64(const void* address, RegisterID dest)
788    {
789        load<64>(address, dest);
790    }
791
792    DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
793    {
794        DataLabel32 label(this);
795        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
796        m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
797        return label;
798    }
799
800    DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
801    {
802        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
803        DataLabelCompact label(this);
804        m_assembler.ldr<64>(dest, address.base, address.offset);
805        return label;
806    }
807
808    void abortWithReason(AbortReason reason)
809    {
810        move(TrustedImm32(reason), dataTempRegister);
811        breakpoint();
812    }
813
814    void abortWithReason(AbortReason reason, intptr_t misc)
815    {
816        move(TrustedImm64(misc), memoryTempRegister);
817        abortWithReason(reason);
818    }
819
820    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
821    {
822        ConvertibleLoadLabel result(this);
823        ASSERT(!(address.offset & ~0xff8));
824        m_assembler.ldr<64>(dest, address.base, address.offset);
825        return result;
826    }
827
828    void load32(ImplicitAddress address, RegisterID dest)
829    {
830        if (tryLoadWithOffset<32>(dest, address.base, address.offset))
831            return;
832
833        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
834        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
835    }
836
837    void load32(BaseIndex address, RegisterID dest)
838    {
839        if (!address.offset && (!address.scale || address.scale == 2)) {
840            m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
841            return;
842        }
843
844        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
845        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
846        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
847    }
848
849    void load32(const void* address, RegisterID dest)
850    {
851        load<32>(address, dest);
852    }
853
854    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
855    {
856        DataLabel32 label(this);
857        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
858        m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
859        return label;
860    }
861
862    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
863    {
864        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
865        DataLabelCompact label(this);
866        m_assembler.ldr<32>(dest, address.base, address.offset);
867        return label;
868    }
869
870    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
871    {
872        load32(address, dest);
873    }
874
875    void load16(ImplicitAddress address, RegisterID dest)
876    {
877        if (tryLoadWithOffset<16>(dest, address.base, address.offset))
878            return;
879
880        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
881        m_assembler.ldrh(dest, address.base, memoryTempRegister);
882    }
883
884    void load16(BaseIndex address, RegisterID dest)
885    {
886        if (!address.offset && (!address.scale || address.scale == 1)) {
887            m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
888            return;
889        }
890
891        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
892        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
893        m_assembler.ldrh(dest, address.base, memoryTempRegister);
894    }
895
896    void load16Unaligned(BaseIndex address, RegisterID dest)
897    {
898        load16(address, dest);
899    }
900
901    void load16Signed(BaseIndex address, RegisterID dest)
902    {
903        if (!address.offset && (!address.scale || address.scale == 1)) {
904            m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
905            return;
906        }
907
908        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
909        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
910        m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
911    }
912
913    void load8(ImplicitAddress address, RegisterID dest)
914    {
915        if (tryLoadWithOffset<8>(dest, address.base, address.offset))
916            return;
917
918        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
919        m_assembler.ldrb(dest, address.base, memoryTempRegister);
920    }
921
922    void load8(BaseIndex address, RegisterID dest)
923    {
924        if (!address.offset && !address.scale) {
925            m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
926            return;
927        }
928
929        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
930        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
931        m_assembler.ldrb(dest, address.base, memoryTempRegister);
932    }
933
934    void load8(const void* address, RegisterID dest)
935    {
936        moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
937        m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
938        if (dest == memoryTempRegister)
939            m_cachedMemoryTempRegister.invalidate();
940    }
941
942    void load8Signed(BaseIndex address, RegisterID dest)
943    {
944        if (!address.offset && !address.scale) {
945            m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
946            return;
947        }
948
949        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
950        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
951        m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
952    }
953
954    void store64(RegisterID src, ImplicitAddress address)
955    {
956        if (tryStoreWithOffset<64>(src, address.base, address.offset))
957            return;
958
959        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
960        m_assembler.str<64>(src, address.base, memoryTempRegister);
961    }
962
963    void store64(RegisterID src, BaseIndex address)
964    {
965        if (!address.offset && (!address.scale || address.scale == 3)) {
966            m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
967            return;
968        }
969
970        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
971        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
972        m_assembler.str<64>(src, address.base, memoryTempRegister);
973    }
974
975    void store64(RegisterID src, const void* address)
976    {
977        store<64>(src, address);
978    }
979
980    void store64(TrustedImm64 imm, ImplicitAddress address)
981    {
982        if (!imm.m_value) {
983            store64(ARM64Registers::zr, address);
984            return;
985        }
986
987        moveToCachedReg(imm, m_dataMemoryTempRegister);
988        store64(dataTempRegister, address);
989    }
990
991    void store64(TrustedImm64 imm, BaseIndex address)
992    {
993        if (!imm.m_value) {
994            store64(ARM64Registers::zr, address);
995            return;
996        }
997
998        moveToCachedReg(imm, m_dataMemoryTempRegister);
999        store64(dataTempRegister, address);
1000    }
1001
1002    DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
1003    {
1004        DataLabel32 label(this);
1005        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1006        m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1007        return label;
1008    }
1009
1010    void store32(RegisterID src, ImplicitAddress address)
1011    {
1012        if (tryStoreWithOffset<32>(src, address.base, address.offset))
1013            return;
1014
1015        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1016        m_assembler.str<32>(src, address.base, memoryTempRegister);
1017    }
1018
1019    void store32(RegisterID src, BaseIndex address)
1020    {
1021        if (!address.offset && (!address.scale || address.scale == 2)) {
1022            m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1023            return;
1024        }
1025
1026        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1027        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1028        m_assembler.str<32>(src, address.base, memoryTempRegister);
1029    }
1030
1031    void store32(RegisterID src, const void* address)
1032    {
1033        store<32>(src, address);
1034    }
1035
1036    void store32(TrustedImm32 imm, ImplicitAddress address)
1037    {
1038        if (!imm.m_value) {
1039            store32(ARM64Registers::zr, address);
1040            return;
1041        }
1042
1043        moveToCachedReg(imm, m_dataMemoryTempRegister);
1044        store32(dataTempRegister, address);
1045    }
1046
1047    void store32(TrustedImm32 imm, BaseIndex address)
1048    {
1049        if (!imm.m_value) {
1050            store32(ARM64Registers::zr, address);
1051            return;
1052        }
1053
1054        moveToCachedReg(imm, m_dataMemoryTempRegister);
1055        store32(dataTempRegister, address);
1056    }
1057
1058    void store32(TrustedImm32 imm, const void* address)
1059    {
1060        if (!imm.m_value) {
1061            store32(ARM64Registers::zr, address);
1062            return;
1063        }
1064
1065        moveToCachedReg(imm, m_dataMemoryTempRegister);
1066        store32(dataTempRegister, address);
1067    }
1068
1069    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1070    {
1071        DataLabel32 label(this);
1072        signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1073        m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1074        return label;
1075    }
1076
1077    void store16(RegisterID src, BaseIndex address)
1078    {
1079        if (!address.offset && (!address.scale || address.scale == 1)) {
1080            m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1081            return;
1082        }
1083
1084        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1085        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1086        m_assembler.strh(src, address.base, memoryTempRegister);
1087    }
1088
1089    void store8(RegisterID src, BaseIndex address)
1090    {
1091        if (!address.offset && !address.scale) {
1092            m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1093            return;
1094        }
1095
1096        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1097        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1098        m_assembler.strb(src, address.base, memoryTempRegister);
1099    }
1100
1101    void store8(RegisterID src, void* address)
1102    {
1103        move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1104        m_assembler.strb(src, memoryTempRegister, 0);
1105    }
1106
1107    void store8(RegisterID src, ImplicitAddress address)
1108    {
1109        if (tryStoreWithOffset<8>(src, address.base, address.offset))
1110            return;
1111
1112        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1113        m_assembler.str<8>(src, address.base, memoryTempRegister);
1114    }
1115
1116    void store8(TrustedImm32 imm, void* address)
1117    {
1118        if (!imm.m_value) {
1119            store8(ARM64Registers::zr, address);
1120            return;
1121        }
1122
1123        move(imm, getCachedDataTempRegisterIDAndInvalidate());
1124        store8(dataTempRegister, address);
1125    }
1126
1127    void store8(TrustedImm32 imm, ImplicitAddress address)
1128    {
1129        if (!imm.m_value) {
1130            store8(ARM64Registers::zr, address);
1131            return;
1132        }
1133
1134        move(imm, getCachedDataTempRegisterIDAndInvalidate());
1135        store8(dataTempRegister, address);
1136    }
1137
1138    // Floating-point operations:
1139
1140    static bool supportsFloatingPoint() { return true; }
1141    static bool supportsFloatingPointTruncate() { return true; }
1142    static bool supportsFloatingPointSqrt() { return true; }
1143    static bool supportsFloatingPointAbs() { return true; }
1144
1145    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1146
1147    void absDouble(FPRegisterID src, FPRegisterID dest)
1148    {
1149        m_assembler.fabs<64>(dest, src);
1150    }
1151
1152    void addDouble(FPRegisterID src, FPRegisterID dest)
1153    {
1154        addDouble(dest, src, dest);
1155    }
1156
1157    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1158    {
1159        m_assembler.fadd<64>(dest, op1, op2);
1160    }
1161
1162    void addDouble(Address src, FPRegisterID dest)
1163    {
1164        loadDouble(src, fpTempRegister);
1165        addDouble(fpTempRegister, dest);
1166    }
1167
1168    void addDouble(AbsoluteAddress address, FPRegisterID dest)
1169    {
1170        loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1171        addDouble(fpTempRegister, dest);
1172    }
1173
1174    void ceilDouble(FPRegisterID src, FPRegisterID dest)
1175    {
1176        m_assembler.frintp<64>(dest, src);
1177    }
1178
1179    void floorDouble(FPRegisterID src, FPRegisterID dest)
1180    {
1181        m_assembler.frintm<64>(dest, src);
1182    }
1183
1184    // Convert 'src' to an integer, and places the resulting 'dest'.
1185    // If the result is not representable as a 32 bit value, branch.
1186    // May also branch for some values that are representable in 32 bits
1187    // (specifically, in this case, 0).
1188    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1189    {
1190        m_assembler.fcvtns<32, 64>(dest, src);
1191
1192        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1193        m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1194        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1195
1196        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
1197        if (negZeroCheck)
1198            failureCases.append(branchTest32(Zero, dest));
1199    }
1200
1201    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1202    {
1203        m_assembler.fcmp<64>(left, right);
1204
1205        if (cond == DoubleNotEqual) {
1206            // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
1207            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1208            Jump result = makeBranch(ARM64Assembler::ConditionNE);
1209            unordered.link(this);
1210            return result;
1211        }
1212        if (cond == DoubleEqualOrUnordered) {
1213            Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1214            Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1215            unordered.link(this);
1216            // We get here if either unordered or equal.
1217            Jump result = jump();
1218            notEqual.link(this);
1219            return result;
1220        }
1221        return makeBranch(cond);
1222    }
1223
1224    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1225    {
1226        m_assembler.fcmp_0<64>(reg);
1227        Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1228        Jump result = makeBranch(ARM64Assembler::ConditionNE);
1229        unordered.link(this);
1230        return result;
1231    }
1232
1233    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1234    {
1235        m_assembler.fcmp_0<64>(reg);
1236        Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1237        Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1238        unordered.link(this);
1239        // We get here if either unordered or equal.
1240        Jump result = jump();
1241        notEqual.link(this);
1242        return result;
1243    }
1244
1245    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1246    {
1247        // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1248        m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1249        zeroExtend32ToPtr(dataTempRegister, dest);
1250        // Check thlow 32-bits sign extend to be equal to the full value.
1251        m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1252        return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1253    }
1254
1255    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1256    {
1257        m_assembler.fcvt<32, 64>(dest, src);
1258    }
1259
1260    void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1261    {
1262        m_assembler.fcvt<64, 32>(dest, src);
1263    }
1264
1265    void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1266    {
1267        move(imm, getCachedDataTempRegisterIDAndInvalidate());
1268        convertInt32ToDouble(dataTempRegister, dest);
1269    }
1270
1271    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1272    {
1273        m_assembler.scvtf<64, 32>(dest, src);
1274    }
1275
1276    void convertInt32ToDouble(Address address, FPRegisterID dest)
1277    {
1278        load32(address, getCachedDataTempRegisterIDAndInvalidate());
1279        convertInt32ToDouble(dataTempRegister, dest);
1280    }
1281
1282    void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1283    {
1284        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1285        convertInt32ToDouble(dataTempRegister, dest);
1286    }
1287
1288    void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1289    {
1290        m_assembler.scvtf<64, 64>(dest, src);
1291    }
1292
1293    void divDouble(FPRegisterID src, FPRegisterID dest)
1294    {
1295        divDouble(dest, src, dest);
1296    }
1297
1298    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1299    {
1300        m_assembler.fdiv<64>(dest, op1, op2);
1301    }
1302
1303    void loadDouble(ImplicitAddress address, FPRegisterID dest)
1304    {
1305        if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1306            return;
1307
1308        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1309        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1310    }
1311
1312    void loadDouble(BaseIndex address, FPRegisterID dest)
1313    {
1314        if (!address.offset && (!address.scale || address.scale == 3)) {
1315            m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1316            return;
1317        }
1318
1319        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1320        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1321        m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1322    }
1323
1324    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1325    {
1326        moveToCachedReg(address, m_cachedMemoryTempRegister);
1327        m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1328    }
1329
1330    void loadFloat(BaseIndex address, FPRegisterID dest)
1331    {
1332        if (!address.offset && (!address.scale || address.scale == 2)) {
1333            m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1334            return;
1335        }
1336
1337        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1338        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1339        m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1340    }
1341
1342    void moveDouble(FPRegisterID src, FPRegisterID dest)
1343    {
1344        m_assembler.fmov<64>(dest, src);
1345    }
1346
1347    void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1348    {
1349        m_assembler.fmov<64>(dest, src);
1350    }
1351
1352    void move64ToDouble(RegisterID src, FPRegisterID dest)
1353    {
1354        m_assembler.fmov<64>(dest, src);
1355    }
1356
1357    void mulDouble(FPRegisterID src, FPRegisterID dest)
1358    {
1359        mulDouble(dest, src, dest);
1360    }
1361
1362    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1363    {
1364        m_assembler.fmul<64>(dest, op1, op2);
1365    }
1366
1367    void mulDouble(Address src, FPRegisterID dest)
1368    {
1369        loadDouble(src, fpTempRegister);
1370        mulDouble(fpTempRegister, dest);
1371    }
1372
1373    void negateDouble(FPRegisterID src, FPRegisterID dest)
1374    {
1375        m_assembler.fneg<64>(dest, src);
1376    }
1377
1378    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1379    {
1380        m_assembler.fsqrt<64>(dest, src);
1381    }
1382
1383    void storeDouble(FPRegisterID src, ImplicitAddress address)
1384    {
1385        if (tryStoreWithOffset<64>(src, address.base, address.offset))
1386            return;
1387
1388        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1389        m_assembler.str<64>(src, address.base, memoryTempRegister);
1390    }
1391
1392    void storeDouble(FPRegisterID src, TrustedImmPtr address)
1393    {
1394        moveToCachedReg(address, m_cachedMemoryTempRegister);
1395        m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1396    }
1397
1398    void storeDouble(FPRegisterID src, BaseIndex address)
1399    {
1400        if (!address.offset && (!address.scale || address.scale == 3)) {
1401            m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1402            return;
1403        }
1404
1405        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1406        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1407        m_assembler.str<64>(src, address.base, memoryTempRegister);
1408    }
1409
1410    void storeFloat(FPRegisterID src, BaseIndex address)
1411    {
1412        if (!address.offset && (!address.scale || address.scale == 2)) {
1413            m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1414            return;
1415        }
1416
1417        signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1418        m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1419        m_assembler.str<32>(src, address.base, memoryTempRegister);
1420    }
1421
1422    void subDouble(FPRegisterID src, FPRegisterID dest)
1423    {
1424        subDouble(dest, src, dest);
1425    }
1426
1427    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1428    {
1429        m_assembler.fsub<64>(dest, op1, op2);
1430    }
1431
1432    void subDouble(Address src, FPRegisterID dest)
1433    {
1434        loadDouble(src, fpTempRegister);
1435        subDouble(fpTempRegister, dest);
1436    }
1437
1438    // Result is undefined if the value is outside of the integer range.
1439    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1440    {
1441        m_assembler.fcvtzs<32, 64>(dest, src);
1442    }
1443
1444    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1445    {
1446        m_assembler.fcvtzu<32, 64>(dest, src);
1447    }
1448
1449
1450    // Stack manipulation operations:
1451    //
1452    // The ABI is assumed to provide a stack abstraction to memory,
1453    // containing machine word sized units of data. Push and pop
1454    // operations add and remove a single register sized unit of data
1455    // to or from the stack. These operations are not supported on
1456    // ARM64. Peek and poke operations read or write values on the
1457    // stack, without moving the current stack position. Additionally,
1458    // there are popToRestore and pushToSave operations, which are
1459    // designed just for quick-and-dirty saving and restoring of
1460    // temporary values. These operations don't claim to have any
1461    // ABI compatibility.
1462
1463    void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1464    {
1465        CRASH();
1466    }
1467
1468    void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1469    {
1470        CRASH();
1471    }
1472
1473    void push(Address) NO_RETURN_DUE_TO_CRASH
1474    {
1475        CRASH();
1476    }
1477
1478    void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1479    {
1480        CRASH();
1481    }
1482
1483    void popPair(RegisterID dest1, RegisterID dest2)
1484    {
1485        m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
1486    }
1487
1488    void pushPair(RegisterID src1, RegisterID src2)
1489    {
1490        m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
1491    }
1492
1493    void popToRestore(RegisterID dest)
1494    {
1495        m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
1496    }
1497
1498    void pushToSave(RegisterID src)
1499    {
1500        m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
1501    }
1502
1503    void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
1504    {
1505        RegisterID reg = dataTempRegister;
1506        pushPair(reg, reg);
1507        move(imm, reg);
1508        store64(reg, stackPointerRegister);
1509        load64(Address(stackPointerRegister, 8), reg);
1510    }
1511
1512    void pushToSave(Address address)
1513    {
1514        load32(address, getCachedDataTempRegisterIDAndInvalidate());
1515        pushToSave(dataTempRegister);
1516    }
1517
1518    void pushToSave(TrustedImm32 imm)
1519    {
1520        move(imm, getCachedDataTempRegisterIDAndInvalidate());
1521        pushToSave(dataTempRegister);
1522    }
1523
1524    void popToRestore(FPRegisterID dest)
1525    {
1526        loadDouble(stackPointerRegister, dest);
1527        add64(TrustedImm32(16), stackPointerRegister);
1528    }
1529
1530    void pushToSave(FPRegisterID src)
1531    {
1532        sub64(TrustedImm32(16), stackPointerRegister);
1533        storeDouble(src, stackPointerRegister);
1534    }
1535
1536    static ptrdiff_t pushToSaveByteOffset() { return 16; }
1537
1538    // Register move operations:
1539
1540    void move(RegisterID src, RegisterID dest)
1541    {
1542        if (src != dest)
1543            m_assembler.mov<64>(dest, src);
1544    }
1545
1546    void move(TrustedImm32 imm, RegisterID dest)
1547    {
1548        moveInternal<TrustedImm32, int32_t>(imm, dest);
1549    }
1550
1551    void move(TrustedImmPtr imm, RegisterID dest)
1552    {
1553        moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
1554    }
1555
1556    void move(TrustedImm64 imm, RegisterID dest)
1557    {
1558        moveInternal<TrustedImm64, int64_t>(imm, dest);
1559    }
1560
1561    void swap(RegisterID reg1, RegisterID reg2)
1562    {
1563        move(reg1, getCachedDataTempRegisterIDAndInvalidate());
1564        move(reg2, reg1);
1565        move(dataTempRegister, reg2);
1566    }
1567
1568    void signExtend32ToPtr(RegisterID src, RegisterID dest)
1569    {
1570        m_assembler.sxtw(dest, src);
1571    }
1572
1573    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1574    {
1575        m_assembler.uxtw(dest, src);
1576    }
1577
1578
1579    // Forwards / external control flow operations:
1580    //
1581    // This set of jump and conditional branch operations return a Jump
1582    // object which may linked at a later point, allow forwards jump,
1583    // or jumps that will require external linkage (after the code has been
1584    // relocated).
1585    //
1586    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1587    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1588    // used (representing the names 'below' and 'above').
1589    //
1590    // Operands to the comparision are provided in the expected order, e.g.
1591    // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1592    // treated as a signed 32bit value, is less than or equal to 5.
1593    //
1594    // jz and jnz test whether the first operand is equal to zero, and take
1595    // an optional second operand of a mask under which to perform the test.
1596
1597    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1598    {
1599        m_assembler.cmp<32>(left, right);
1600        return Jump(makeBranch(cond));
1601    }
1602
1603    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1604    {
1605        if (isUInt12(right.m_value))
1606            m_assembler.cmp<32>(left, UInt12(right.m_value));
1607        else if (isUInt12(-right.m_value))
1608            m_assembler.cmn<32>(left, UInt12(-right.m_value));
1609        else {
1610            moveToCachedReg(right, m_dataMemoryTempRegister);
1611            m_assembler.cmp<32>(left, dataTempRegister);
1612        }
1613        return Jump(makeBranch(cond));
1614    }
1615
1616    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1617    {
1618        load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
1619        return branch32(cond, left, memoryTempRegister);
1620    }
1621
1622    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1623    {
1624        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1625        return branch32(cond, memoryTempRegister, right);
1626    }
1627
1628    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1629    {
1630        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1631        return branch32(cond, memoryTempRegister, right);
1632    }
1633
1634    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1635    {
1636        load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
1637        return branch32(cond, memoryTempRegister, right);
1638    }
1639
1640    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1641    {
1642        load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1643        return branch32(cond, dataTempRegister, right);
1644    }
1645
1646    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1647    {
1648        load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1649        return branch32(cond, memoryTempRegister, right);
1650    }
1651
1652    Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
1653    {
1654        m_assembler.cmp<64>(left, right);
1655        return Jump(makeBranch(cond));
1656    }
1657
1658    Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
1659    {
1660        intptr_t immediate = right.m_value;
1661        if (isUInt12(immediate))
1662            m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
1663        else if (isUInt12(-immediate))
1664            m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
1665        else {
1666            moveToCachedReg(right, m_dataMemoryTempRegister);
1667            m_assembler.cmp<64>(left, dataTempRegister);
1668        }
1669        return Jump(makeBranch(cond));
1670    }
1671
1672    Jump branch64(RelationalCondition cond, RegisterID left, Address right)
1673    {
1674        load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
1675        return branch64(cond, left, memoryTempRegister);
1676    }
1677
1678    Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1679    {
1680        load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1681        return branch64(cond, dataTempRegister, right);
1682    }
1683
1684    Jump branch64(RelationalCondition cond, Address left, RegisterID right)
1685    {
1686        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1687        return branch64(cond, memoryTempRegister, right);
1688    }
1689
1690    Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
1691    {
1692        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1693        return branch64(cond, memoryTempRegister, right);
1694    }
1695
1696    Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
1697    {
1698        load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
1699        return branch64(cond, memoryTempRegister, right);
1700    }
1701
1702    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1703    {
1704        ASSERT(!(0xffffff00 & right.m_value));
1705        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1706        return branch32(cond, memoryTempRegister, right);
1707    }
1708
1709    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1710    {
1711        ASSERT(!(0xffffff00 & right.m_value));
1712        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
1713        return branch32(cond, memoryTempRegister, right);
1714    }
1715
1716    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1717    {
1718        ASSERT(!(0xffffff00 & right.m_value));
1719        load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
1720        return branch32(cond, memoryTempRegister, right);
1721    }
1722
1723    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1724    {
1725        m_assembler.tst<32>(reg, mask);
1726        return Jump(makeBranch(cond));
1727    }
1728
1729    void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1730    {
1731        if (mask.m_value == -1)
1732            m_assembler.tst<32>(reg, reg);
1733        else {
1734            bool testedWithImmediate = false;
1735            if ((cond == Zero) || (cond == NonZero)) {
1736                LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
1737
1738                if (logicalImm.isValid()) {
1739                    m_assembler.tst<32>(reg, logicalImm);
1740                    testedWithImmediate = true;
1741                }
1742            }
1743            if (!testedWithImmediate) {
1744                move(mask, getCachedDataTempRegisterIDAndInvalidate());
1745                m_assembler.tst<32>(reg, dataTempRegister);
1746            }
1747        }
1748    }
1749
1750    Jump branch(ResultCondition cond)
1751    {
1752        return Jump(makeBranch(cond));
1753    }
1754
1755    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1756    {
1757        if (mask.m_value == -1) {
1758            if ((cond == Zero) || (cond == NonZero))
1759                return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
1760            m_assembler.tst<32>(reg, reg);
1761        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1762            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1763        else {
1764            if ((cond == Zero) || (cond == NonZero)) {
1765                LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
1766
1767                if (logicalImm.isValid()) {
1768                    m_assembler.tst<32>(reg, logicalImm);
1769                    return Jump(makeBranch(cond));
1770                }
1771            }
1772
1773            move(mask, getCachedDataTempRegisterIDAndInvalidate());
1774            m_assembler.tst<32>(reg, dataTempRegister);
1775        }
1776        return Jump(makeBranch(cond));
1777    }
1778
1779    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1780    {
1781        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1782        return branchTest32(cond, memoryTempRegister, mask);
1783    }
1784
1785    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1786    {
1787        load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
1788        return branchTest32(cond, memoryTempRegister, mask);
1789    }
1790
1791    Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
1792    {
1793        m_assembler.tst<64>(reg, mask);
1794        return Jump(makeBranch(cond));
1795    }
1796
1797    Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1798    {
1799        if (mask.m_value == -1) {
1800            if ((cond == Zero) || (cond == NonZero))
1801                return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
1802            m_assembler.tst<64>(reg, reg);
1803        } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
1804            return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
1805        else {
1806            if ((cond == Zero) || (cond == NonZero)) {
1807                LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
1808
1809                if (logicalImm.isValid()) {
1810                    m_assembler.tst<64>(reg, logicalImm);
1811                    return Jump(makeBranch(cond));
1812                }
1813            }
1814
1815            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
1816            m_assembler.tst<64>(reg, dataTempRegister);
1817        }
1818        return Jump(makeBranch(cond));
1819    }
1820
1821    Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
1822    {
1823        load64(address, getCachedDataTempRegisterIDAndInvalidate());
1824        return branchTest64(cond, dataTempRegister, mask);
1825    }
1826
1827    Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1828    {
1829        load64(address, getCachedDataTempRegisterIDAndInvalidate());
1830        return branchTest64(cond, dataTempRegister, mask);
1831    }
1832
1833    Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1834    {
1835        load64(address, getCachedDataTempRegisterIDAndInvalidate());
1836        return branchTest64(cond, dataTempRegister, mask);
1837    }
1838
1839    Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1840    {
1841        load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1842        return branchTest64(cond, dataTempRegister, mask);
1843    }
1844
1845    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1846    {
1847        load8(address, getCachedDataTempRegisterIDAndInvalidate());
1848        return branchTest32(cond, dataTempRegister, mask);
1849    }
1850
1851    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1852    {
1853        load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1854        return branchTest32(cond, dataTempRegister, mask);
1855    }
1856
1857    Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
1858    {
1859        move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
1860        m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
1861        return branchTest32(cond, dataTempRegister, mask);
1862    }
1863
1864    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1865    {
1866        load8(address, getCachedDataTempRegisterIDAndInvalidate());
1867        return branchTest32(cond, dataTempRegister, mask);
1868    }
1869
1870    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1871    {
1872        return branch32(cond, left, right);
1873    }
1874
1875
1876    // Arithmetic control flow operations:
1877    //
1878    // This set of conditional branch operations branch based
1879    // on the result of an arithmetic operation. The operation
1880    // is performed as normal, storing the result.
1881    //
1882    // * jz operations branch if the result is zero.
1883    // * jo operations branch if the (signed) arithmetic
1884    //   operation caused an overflow to occur.
1885
1886    Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1887    {
1888        m_assembler.add<32, S>(dest, op1, op2);
1889        return Jump(makeBranch(cond));
1890    }
1891
1892    Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1893    {
1894        if (isUInt12(imm.m_value)) {
1895            m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
1896            return Jump(makeBranch(cond));
1897        }
1898        if (isUInt12(-imm.m_value)) {
1899            m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
1900            return Jump(makeBranch(cond));
1901        }
1902
1903        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
1904        return branchAdd32(cond, op1, dataTempRegister, dest);
1905    }
1906
1907    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1908    {
1909        load32(src, getCachedDataTempRegisterIDAndInvalidate());
1910        return branchAdd32(cond, dest, dataTempRegister, dest);
1911    }
1912
1913    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1914    {
1915        return branchAdd32(cond, dest, src, dest);
1916    }
1917
1918    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1919    {
1920        return branchAdd32(cond, dest, imm, dest);
1921    }
1922
1923    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
1924    {
1925        load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1926
1927        if (isUInt12(imm.m_value)) {
1928            m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
1929            store32(dataTempRegister, address.m_ptr);
1930        } else if (isUInt12(-imm.m_value)) {
1931            m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
1932            store32(dataTempRegister, address.m_ptr);
1933        } else {
1934            move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
1935            m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
1936            store32(dataTempRegister, address.m_ptr);
1937        }
1938
1939        return Jump(makeBranch(cond));
1940    }
1941
1942    Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
1943    {
1944        m_assembler.add<64, S>(dest, op1, op2);
1945        return Jump(makeBranch(cond));
1946    }
1947
1948    Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
1949    {
1950        if (isUInt12(imm.m_value)) {
1951            m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
1952            return Jump(makeBranch(cond));
1953        }
1954        if (isUInt12(-imm.m_value)) {
1955            m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
1956            return Jump(makeBranch(cond));
1957        }
1958
1959        move(imm, getCachedDataTempRegisterIDAndInvalidate());
1960        return branchAdd64(cond, op1, dataTempRegister, dest);
1961    }
1962
1963    Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
1964    {
1965        return branchAdd64(cond, dest, src, dest);
1966    }
1967
1968    Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1969    {
1970        return branchAdd64(cond, dest, imm, dest);
1971    }
1972
1973    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1974    {
1975        ASSERT(cond != Signed);
1976
1977        if (cond != Overflow) {
1978            m_assembler.mul<32>(dest, src1, src2);
1979            return branchTest32(cond, dest);
1980        }
1981
1982        // This is a signed multiple of two 32-bit values, producing a 64-bit result.
1983        m_assembler.smull(dest, src1, src2);
1984        // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
1985        m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
1986        // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
1987        m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
1988        // After a mul32 the top 32 bits of the register should be clear.
1989        zeroExtend32ToPtr(dest, dest);
1990        // Check that bits 31..63 of the original result were all equal.
1991        return branch32(NotEqual, memoryTempRegister, dataTempRegister);
1992    }
1993
1994    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1995    {
1996        return branchMul32(cond, dest, src, dest);
1997    }
1998
1999    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
2000    {
2001        move(imm, getCachedDataTempRegisterIDAndInvalidate());
2002        return branchMul32(cond, dataTempRegister, src, dest);
2003    }
2004
2005    Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2006    {
2007        ASSERT(cond != Signed);
2008
2009        // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2010        m_assembler.mul<64>(dest, src1, src2);
2011
2012        if (cond != Overflow)
2013            return branchTest64(cond, dest);
2014
2015        // Compute bits 127..64 of the result into dataTempRegister.
2016        m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2);
2017        // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
2018        m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63);
2019        // Check that bits 31..63 of the original result were all equal.
2020        return branch64(NotEqual, memoryTempRegister, dataTempRegister);
2021    }
2022
2023    Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
2024    {
2025        return branchMul64(cond, dest, src, dest);
2026    }
2027
2028    Jump branchNeg32(ResultCondition cond, RegisterID dest)
2029    {
2030        m_assembler.neg<32, S>(dest, dest);
2031        return Jump(makeBranch(cond));
2032    }
2033
2034    Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
2035    {
2036        m_assembler.neg<64, S>(srcDest, srcDest);
2037        return Jump(makeBranch(cond));
2038    }
2039
2040    Jump branchSub32(ResultCondition cond, RegisterID dest)
2041    {
2042        m_assembler.neg<32, S>(dest, dest);
2043        return Jump(makeBranch(cond));
2044    }
2045
2046    Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2047    {
2048        m_assembler.sub<32, S>(dest, op1, op2);
2049        return Jump(makeBranch(cond));
2050    }
2051
2052    Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2053    {
2054        if (isUInt12(imm.m_value)) {
2055            m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
2056            return Jump(makeBranch(cond));
2057        }
2058        if (isUInt12(-imm.m_value)) {
2059            m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
2060            return Jump(makeBranch(cond));
2061        }
2062
2063        signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2064        return branchSub32(cond, op1, dataTempRegister, dest);
2065    }
2066
2067    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
2068    {
2069        return branchSub32(cond, dest, src, dest);
2070    }
2071
2072    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2073    {
2074        return branchSub32(cond, dest, imm, dest);
2075    }
2076
2077    Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2078    {
2079        m_assembler.sub<64, S>(dest, op1, op2);
2080        return Jump(makeBranch(cond));
2081    }
2082
2083    Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2084    {
2085        if (isUInt12(imm.m_value)) {
2086            m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
2087            return Jump(makeBranch(cond));
2088        }
2089        if (isUInt12(-imm.m_value)) {
2090            m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
2091            return Jump(makeBranch(cond));
2092        }
2093
2094        move(imm, getCachedDataTempRegisterIDAndInvalidate());
2095        return branchSub64(cond, op1, dataTempRegister, dest);
2096    }
2097
2098    Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
2099    {
2100        return branchSub64(cond, dest, src, dest);
2101    }
2102
2103    Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2104    {
2105        return branchSub64(cond, dest, imm, dest);
2106    }
2107
2108
2109    // Jumps, calls, returns
2110
2111    ALWAYS_INLINE Call call()
2112    {
2113        AssemblerLabel pointerLabel = m_assembler.label();
2114        moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2115        invalidateAllTempRegisters();
2116        m_assembler.blr(dataTempRegister);
2117        AssemblerLabel callLabel = m_assembler.label();
2118        ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2119        return Call(callLabel, Call::Linkable);
2120    }
2121
2122    ALWAYS_INLINE Call call(RegisterID target)
2123    {
2124        invalidateAllTempRegisters();
2125        m_assembler.blr(target);
2126        return Call(m_assembler.label(), Call::None);
2127    }
2128
2129    ALWAYS_INLINE Call call(Address address)
2130    {
2131        load64(address, getCachedDataTempRegisterIDAndInvalidate());
2132        return call(dataTempRegister);
2133    }
2134
2135    ALWAYS_INLINE Jump jump()
2136    {
2137        AssemblerLabel label = m_assembler.label();
2138        m_assembler.b();
2139        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
2140    }
2141
2142    void jump(RegisterID target)
2143    {
2144        m_assembler.br(target);
2145    }
2146
2147    void jump(Address address)
2148    {
2149        load64(address, getCachedDataTempRegisterIDAndInvalidate());
2150        m_assembler.br(dataTempRegister);
2151    }
2152
2153    void jump(AbsoluteAddress address)
2154    {
2155        move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
2156        load64(Address(dataTempRegister), dataTempRegister);
2157        m_assembler.br(dataTempRegister);
2158    }
2159
2160    ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
2161    {
2162        oldJump.link(this);
2163        return tailRecursiveCall();
2164    }
2165
2166    ALWAYS_INLINE Call nearCall()
2167    {
2168        m_assembler.bl();
2169        return Call(m_assembler.label(), Call::LinkableNear);
2170    }
2171
2172    ALWAYS_INLINE void ret()
2173    {
2174        m_assembler.ret();
2175    }
2176
2177    ALWAYS_INLINE Call tailRecursiveCall()
2178    {
2179        // Like a normal call, but don't link.
2180        AssemblerLabel pointerLabel = m_assembler.label();
2181        moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2182        m_assembler.br(dataTempRegister);
2183        AssemblerLabel callLabel = m_assembler.label();
2184        ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2185        return Call(callLabel, Call::Linkable);
2186    }
2187
2188
2189    // Comparisons operations
2190
2191    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2192    {
2193        m_assembler.cmp<32>(left, right);
2194        m_assembler.cset<32>(dest, ARM64Condition(cond));
2195    }
2196
2197    void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
2198    {
2199        load32(left, getCachedDataTempRegisterIDAndInvalidate());
2200        m_assembler.cmp<32>(dataTempRegister, right);
2201        m_assembler.cset<32>(dest, ARM64Condition(cond));
2202    }
2203
2204    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2205    {
2206        move(right, getCachedDataTempRegisterIDAndInvalidate());
2207        m_assembler.cmp<32>(left, dataTempRegister);
2208        m_assembler.cset<32>(dest, ARM64Condition(cond));
2209    }
2210
2211    void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2212    {
2213        m_assembler.cmp<64>(left, right);
2214        m_assembler.cset<32>(dest, ARM64Condition(cond));
2215    }
2216
2217    void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2218    {
2219        signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
2220        m_assembler.cmp<64>(left, dataTempRegister);
2221        m_assembler.cset<32>(dest, ARM64Condition(cond));
2222    }
2223
2224    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
2225    {
2226        load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2227        move(right, getCachedDataTempRegisterIDAndInvalidate());
2228        compare32(cond, memoryTempRegister, dataTempRegister, dest);
2229    }
2230
2231    void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2232    {
2233        if (mask.m_value == -1)
2234            m_assembler.tst<32>(src, src);
2235        else {
2236            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2237            m_assembler.tst<32>(src, dataTempRegister);
2238        }
2239        m_assembler.cset<32>(dest, ARM64Condition(cond));
2240    }
2241
2242    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2243    {
2244        load32(address, getCachedDataTempRegisterIDAndInvalidate());
2245        test32(cond, dataTempRegister, mask, dest);
2246    }
2247
2248    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2249    {
2250        load8(address, getCachedDataTempRegisterIDAndInvalidate());
2251        test32(cond, dataTempRegister, mask, dest);
2252    }
2253
2254    void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2255    {
2256        m_assembler.tst<64>(op1, op2);
2257        m_assembler.cset<32>(dest, ARM64Condition(cond));
2258    }
2259
2260    void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2261    {
2262        if (mask.m_value == -1)
2263            m_assembler.tst<64>(src, src);
2264        else {
2265            signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2266            m_assembler.tst<64>(src, dataTempRegister);
2267        }
2268        m_assembler.cset<32>(dest, ARM64Condition(cond));
2269    }
2270
2271
2272    // Patchable operations
2273
2274    ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
2275    {
2276        DataLabel32 label(this);
2277        moveWithFixedWidth(imm, dest);
2278        return label;
2279    }
2280
2281    ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
2282    {
2283        DataLabelPtr label(this);
2284        moveWithFixedWidth(imm, dest);
2285        return label;
2286    }
2287
2288    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2289    {
2290        dataLabel = DataLabelPtr(this);
2291        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2292        return branch64(cond, left, dataTempRegister);
2293    }
2294
2295    ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2296    {
2297        dataLabel = DataLabelPtr(this);
2298        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2299        return branch64(cond, left, dataTempRegister);
2300    }
2301
2302    ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
2303    {
2304        dataLabel = DataLabel32(this);
2305        moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2306        return branch32(cond, left, dataTempRegister);
2307    }
2308
2309    PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
2310    {
2311        m_makeJumpPatchable = true;
2312        Jump result = branch32(cond, left, TrustedImm32(right));
2313        m_makeJumpPatchable = false;
2314        return PatchableJump(result);
2315    }
2316
2317    PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2318    {
2319        m_makeJumpPatchable = true;
2320        Jump result = branchTest32(cond, reg, mask);
2321        m_makeJumpPatchable = false;
2322        return PatchableJump(result);
2323    }
2324
2325    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
2326    {
2327        m_makeJumpPatchable = true;
2328        Jump result = branch32(cond, reg, imm);
2329        m_makeJumpPatchable = false;
2330        return PatchableJump(result);
2331    }
2332
2333    PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2334    {
2335        m_makeJumpPatchable = true;
2336        Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
2337        m_makeJumpPatchable = false;
2338        return PatchableJump(result);
2339    }
2340
2341    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
2342    {
2343        m_makeJumpPatchable = true;
2344        Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
2345        m_makeJumpPatchable = false;
2346        return PatchableJump(result);
2347    }
2348
2349    PatchableJump patchableJump()
2350    {
2351        m_makeJumpPatchable = true;
2352        Jump result = jump();
2353        m_makeJumpPatchable = false;
2354        return PatchableJump(result);
2355    }
2356
2357    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
2358    {
2359        DataLabelPtr label(this);
2360        moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
2361        store64(dataTempRegister, address);
2362        return label;
2363    }
2364
2365    ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
2366    {
2367        return storePtrWithPatch(TrustedImmPtr(0), address);
2368    }
2369
2370    static void reemitInitialMoveWithPatch(void* address, void* value)
2371    {
2372        ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
2373    }
2374
2375    // Miscellaneous operations:
2376
2377    void breakpoint(uint16_t imm = 0)
2378    {
2379        m_assembler.brk(imm);
2380    }
2381
2382    void nop()
2383    {
2384        m_assembler.nop();
2385    }
2386
2387    void memoryFence()
2388    {
2389        m_assembler.dmbSY();
2390    }
2391
2392
2393    // Misc helper functions.
2394
2395    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2396    static RelationalCondition invert(RelationalCondition cond)
2397    {
2398        return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
2399    }
2400
2401    static FunctionPtr readCallTarget(CodeLocationCall call)
2402    {
2403        return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
2404    }
2405
2406    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
2407    {
2408        ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
2409    }
2410
2411    static ptrdiff_t maxJumpReplacementSize()
2412    {
2413        return ARM64Assembler::maxJumpReplacementSize();
2414    }
2415
2416    RegisterID scratchRegisterForBlinding()
2417    {
2418        // We *do not* have a scratch register for blinding.
2419        RELEASE_ASSERT_NOT_REACHED();
2420        return getCachedDataTempRegisterIDAndInvalidate();
2421    }
2422
2423    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2424    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
2425
2426    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
2427    {
2428        return label.labelAtOffset(0);
2429    }
2430
2431    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2432    {
2433        UNREACHABLE_FOR_PLATFORM();
2434        return CodeLocationLabel();
2435    }
2436
2437    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
2438    {
2439        UNREACHABLE_FOR_PLATFORM();
2440        return CodeLocationLabel();
2441    }
2442
2443    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
2444    {
2445        reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
2446    }
2447
2448    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
2449    {
2450        UNREACHABLE_FOR_PLATFORM();
2451    }
2452
2453    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
2454    {
2455        UNREACHABLE_FOR_PLATFORM();
2456    }
2457
2458protected:
2459    ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
2460    {
2461        m_assembler.b_cond(cond);
2462        AssemblerLabel label = m_assembler.label();
2463        m_assembler.nop();
2464        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
2465    }
2466    ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
2467    ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
2468    ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
2469
2470    template <int dataSize>
2471    ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
2472    {
2473        if (cond == IsZero)
2474            m_assembler.cbz<dataSize>(reg);
2475        else
2476            m_assembler.cbnz<dataSize>(reg);
2477        AssemblerLabel label = m_assembler.label();
2478        m_assembler.nop();
2479        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
2480    }
2481
2482    ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
2483    {
2484        ASSERT(bit < 64);
2485        bit &= 0x3f;
2486        if (cond == IsZero)
2487            m_assembler.tbz(reg, bit);
2488        else
2489            m_assembler.tbnz(reg, bit);
2490        AssemblerLabel label = m_assembler.label();
2491        m_assembler.nop();
2492        return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
2493    }
2494
2495    ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
2496    {
2497        return static_cast<ARM64Assembler::Condition>(cond);
2498    }
2499
2500    ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
2501    {
2502        return static_cast<ARM64Assembler::Condition>(cond);
2503    }
2504
2505    ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
2506    {
2507        return static_cast<ARM64Assembler::Condition>(cond);
2508    }
2509
2510private:
2511    ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
2512    ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
2513
2514    ALWAYS_INLINE bool isInIntRange(intptr_t value)
2515    {
2516        return value == ((value << 32) >> 32);
2517    }
2518
2519    template<typename ImmediateType, typename rawType>
2520    void moveInternal(ImmediateType imm, RegisterID dest)
2521    {
2522        const int dataSize = sizeof(rawType) * 8;
2523        const int numberHalfWords = dataSize / 16;
2524        rawType value = bitwise_cast<rawType>(imm.m_value);
2525        uint16_t halfword[numberHalfWords];
2526
2527        // Handle 0 and ~0 here to simplify code below
2528        if (!value) {
2529            m_assembler.movz<dataSize>(dest, 0);
2530            return;
2531        }
2532        if (!~value) {
2533            m_assembler.movn<dataSize>(dest, 0);
2534            return;
2535        }
2536
2537        LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
2538
2539        if (logicalImm.isValid()) {
2540            m_assembler.movi<dataSize>(dest, logicalImm);
2541            return;
2542        }
2543
2544        // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
2545        int zeroOrNegateVote = 0;
2546        for (int i = 0; i < numberHalfWords; ++i) {
2547            halfword[i] = getHalfword(value, i);
2548            if (!halfword[i])
2549                zeroOrNegateVote++;
2550            else if (halfword[i] == 0xffff)
2551                zeroOrNegateVote--;
2552        }
2553
2554        bool needToClearRegister = true;
2555        if (zeroOrNegateVote >= 0) {
2556            for (int i = 0; i < numberHalfWords; i++) {
2557                if (halfword[i]) {
2558                    if (needToClearRegister) {
2559                        m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
2560                        needToClearRegister = false;
2561                    } else
2562                        m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2563                }
2564            }
2565        } else {
2566            for (int i = 0; i < numberHalfWords; i++) {
2567                if (halfword[i] != 0xffff) {
2568                    if (needToClearRegister) {
2569                        m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
2570                        needToClearRegister = false;
2571                    } else
2572                        m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
2573                }
2574            }
2575        }
2576    }
2577
2578    template<int datasize>
2579    ALWAYS_INLINE void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2580    {
2581        m_assembler.ldr<datasize>(rt, rn, pimm);
2582    }
2583
2584    template<int datasize>
2585    ALWAYS_INLINE void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2586    {
2587        m_assembler.ldur<datasize>(rt, rn, simm);
2588    }
2589
2590    template<int datasize>
2591    ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
2592    {
2593        m_assembler.str<datasize>(rt, rn, pimm);
2594    }
2595
2596    template<int datasize>
2597    ALWAYS_INLINE void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
2598    {
2599        m_assembler.stur<datasize>(rt, rn, simm);
2600    }
2601
2602    void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
2603    {
2604        int32_t value = imm.m_value;
2605        m_assembler.movz<32>(dest, getHalfword(value, 0));
2606        m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2607    }
2608
2609    void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
2610    {
2611        intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
2612        m_assembler.movz<64>(dest, getHalfword(value, 0));
2613        m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
2614        m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
2615    }
2616
2617    void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
2618    {
2619        if (value >= 0) {
2620            m_assembler.movz<32>(dest, getHalfword(value, 0));
2621            m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2622        } else {
2623            m_assembler.movn<32>(dest, ~getHalfword(value, 0));
2624            m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
2625        }
2626    }
2627
2628    void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
2629    {
2630        move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
2631    }
2632
2633    template<int datasize>
2634    ALWAYS_INLINE void load(const void* address, RegisterID dest)
2635    {
2636        intptr_t currentRegisterContents;
2637        if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2638            intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2639            intptr_t addressDelta = addressAsInt - currentRegisterContents;
2640
2641            if (dest == memoryTempRegister)
2642                m_cachedMemoryTempRegister.invalidate();
2643
2644            if (isInIntRange(addressDelta)) {
2645                if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2646                    m_assembler.ldur<datasize>(dest,  memoryTempRegister, addressDelta);
2647                    return;
2648                }
2649
2650                if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2651                    m_assembler.ldr<datasize>(dest,  memoryTempRegister, addressDelta);
2652                    return;
2653                }
2654            }
2655
2656            if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2657                m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2658                m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2659                m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2660                return;
2661            }
2662        }
2663
2664        move(TrustedImmPtr(address), memoryTempRegister);
2665        if (dest == memoryTempRegister)
2666            m_cachedMemoryTempRegister.invalidate();
2667        else
2668            m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2669        m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
2670    }
2671
2672    template<int datasize>
2673    ALWAYS_INLINE void store(RegisterID src, const void* address)
2674    {
2675        intptr_t currentRegisterContents;
2676        if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
2677            intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
2678            intptr_t addressDelta = addressAsInt - currentRegisterContents;
2679
2680            if (isInIntRange(addressDelta)) {
2681                if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
2682                    m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
2683                    return;
2684                }
2685
2686                if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
2687                    m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
2688                    return;
2689                }
2690            }
2691
2692            if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
2693                m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
2694                m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2695                m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2696                return;
2697            }
2698        }
2699
2700        move(TrustedImmPtr(address), memoryTempRegister);
2701        m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
2702        m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
2703    }
2704
2705    template <int dataSize>
2706    ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
2707    {
2708        intptr_t currentRegisterContents;
2709        if (dest.value(currentRegisterContents)) {
2710            if (currentRegisterContents == immediate)
2711                return true;
2712
2713            LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
2714
2715            if (logicalImm.isValid()) {
2716                m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
2717                dest.setValue(immediate);
2718                return true;
2719            }
2720
2721            if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
2722                if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
2723                    m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
2724
2725                if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
2726                    m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
2727
2728                dest.setValue(immediate);
2729                return true;
2730            }
2731        }
2732
2733        return false;
2734    }
2735
2736    void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
2737    {
2738        if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
2739            return;
2740
2741        moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
2742        dest.setValue(imm.m_value);
2743    }
2744
2745    void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
2746    {
2747        if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
2748            return;
2749
2750        moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
2751        dest.setValue(imm.asIntptr());
2752    }
2753
2754    void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
2755    {
2756        if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
2757            return;
2758
2759        moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
2760        dest.setValue(imm.m_value);
2761    }
2762
2763    template<int datasize>
2764    ALWAYS_INLINE bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2765    {
2766        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2767            loadUnscaledImmediate<datasize>(rt, rn, offset);
2768            return true;
2769        }
2770        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2771            loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2772            return true;
2773        }
2774        return false;
2775    }
2776
2777    template<int datasize>
2778    ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2779    {
2780        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2781            m_assembler.ldur<datasize>(rt, rn, offset);
2782            return true;
2783        }
2784        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2785            m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
2786            return true;
2787        }
2788        return false;
2789    }
2790
2791    template<int datasize>
2792    ALWAYS_INLINE bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
2793    {
2794        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2795            storeUnscaledImmediate<datasize>(rt, rn, offset);
2796            return true;
2797        }
2798        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2799            storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
2800            return true;
2801        }
2802        return false;
2803    }
2804
2805    template<int datasize>
2806    ALWAYS_INLINE bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
2807    {
2808        if (ARM64Assembler::canEncodeSImmOffset(offset)) {
2809            m_assembler.stur<datasize>(rt, rn, offset);
2810            return true;
2811        }
2812        if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
2813            m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
2814            return true;
2815        }
2816        return false;
2817    }
2818
2819    friend class LinkBuffer;
2820    friend class RepatchBuffer;
2821
2822    static void linkCall(void* code, Call call, FunctionPtr function)
2823    {
2824        if (call.isFlagSet(Call::Near))
2825            ARM64Assembler::linkCall(code, call.m_label, function.value());
2826        else
2827            ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
2828    }
2829
2830    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2831    {
2832        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2833    }
2834
2835    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2836    {
2837        ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2838    }
2839
2840    CachedTempRegister m_dataMemoryTempRegister;
2841    CachedTempRegister m_cachedMemoryTempRegister;
2842    bool m_makeJumpPatchable;
2843};
2844
2845// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
2846template<>
2847ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2848{
2849    m_assembler.ldrb(rt, rn, pimm);
2850}
2851
2852template<>
2853ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2854{
2855    m_assembler.ldrh(rt, rn, pimm);
2856}
2857
2858template<>
2859ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2860{
2861    m_assembler.ldurb(rt, rn, simm);
2862}
2863
2864template<>
2865ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2866{
2867    m_assembler.ldurh(rt, rn, simm);
2868}
2869
2870template<>
2871ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
2872{
2873    m_assembler.strb(rt, rn, pimm);
2874}
2875
2876template<>
2877ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
2878{
2879    m_assembler.strh(rt, rn, pimm);
2880}
2881
2882template<>
2883ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
2884{
2885    m_assembler.sturb(rt, rn, simm);
2886}
2887
2888template<>
2889ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
2890{
2891    m_assembler.sturh(rt, rn, simm);
2892}
2893
2894} // namespace JSC
2895
2896#endif // ENABLE(ASSEMBLER)
2897
2898#endif // MacroAssemblerARM64_h
2899