1/*
2 * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#ifndef ARMAssembler_h
28#define ARMAssembler_h
29
30#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
31
32#include "AssemblerBuffer.h"
33#include <limits.h>
34#include <wtf/Assertions.h>
35#include <wtf/Vector.h>
36#include <stdint.h>
37
38namespace JSC {
39
40namespace ARMRegisters {
41    typedef enum {
42        r0,
43        r1,
44        r2,
45        r3,
46        r4,
47        r5,
48        r6,
49        r7, fp = r7,   // frame pointer
50        r8,
51        r9, sb = r9,   // static base
52        r10, sl = r10, // stack limit
53        r11,
54        r12, ip = r12,
55        r13, sp = r13,
56        r14, lr = r14,
57        r15, pc = r15,
58    } RegisterID;
59
60    typedef enum {
61        s0,
62        s1,
63        s2,
64        s3,
65        s4,
66        s5,
67        s6,
68        s7,
69        s8,
70        s9,
71        s10,
72        s11,
73        s12,
74        s13,
75        s14,
76        s15,
77        s16,
78        s17,
79        s18,
80        s19,
81        s20,
82        s21,
83        s22,
84        s23,
85        s24,
86        s25,
87        s26,
88        s27,
89        s28,
90        s29,
91        s30,
92        s31,
93    } FPSingleRegisterID;
94
95    typedef enum {
96        d0,
97        d1,
98        d2,
99        d3,
100        d4,
101        d5,
102        d6,
103        d7,
104        d8,
105        d9,
106        d10,
107        d11,
108        d12,
109        d13,
110        d14,
111        d15,
112        d16,
113        d17,
114        d18,
115        d19,
116        d20,
117        d21,
118        d22,
119        d23,
120        d24,
121        d25,
122        d26,
123        d27,
124        d28,
125        d29,
126        d30,
127        d31,
128    } FPDoubleRegisterID;
129
130    typedef enum {
131        q0,
132        q1,
133        q2,
134        q3,
135        q4,
136        q5,
137        q6,
138        q7,
139        q8,
140        q9,
141        q10,
142        q11,
143        q12,
144        q13,
145        q14,
146        q15,
147        q16,
148        q17,
149        q18,
150        q19,
151        q20,
152        q21,
153        q22,
154        q23,
155        q24,
156        q25,
157        q26,
158        q27,
159        q28,
160        q29,
161        q30,
162        q31,
163    } FPQuadRegisterID;
164
165    inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
166    {
167        ASSERT(reg < d16);
168        return (FPSingleRegisterID)(reg << 1);
169    }
170
171    inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
172    {
173        ASSERT(!(reg & 1));
174        return (FPDoubleRegisterID)(reg >> 1);
175    }
176
177#if USE(MASM_PROBE)
178    #define FOR_EACH_CPU_REGISTER(V) \
179        FOR_EACH_CPU_GPREGISTER(V) \
180        FOR_EACH_CPU_SPECIAL_REGISTER(V) \
181        FOR_EACH_CPU_FPREGISTER(V)
182
183    #define FOR_EACH_CPU_GPREGISTER(V) \
184        V(void*, r0) \
185        V(void*, r1) \
186        V(void*, r2) \
187        V(void*, r3) \
188        V(void*, r4) \
189        V(void*, r5) \
190        V(void*, r6) \
191        V(void*, r7) \
192        V(void*, r8) \
193        V(void*, r9) \
194        V(void*, r10) \
195        V(void*, r11) \
196        V(void*, ip) \
197        V(void*, sp) \
198        V(void*, lr) \
199        V(void*, pc)
200
201    #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
202        V(void*, apsr) \
203        V(void*, fpscr) \
204
205    #define FOR_EACH_CPU_FPREGISTER(V) \
206        V(double, d0) \
207        V(double, d1) \
208        V(double, d2) \
209        V(double, d3) \
210        V(double, d4) \
211        V(double, d5) \
212        V(double, d6) \
213        V(double, d7) \
214        V(double, d8) \
215        V(double, d9) \
216        V(double, d10) \
217        V(double, d11) \
218        V(double, d12) \
219        V(double, d13) \
220        V(double, d14) \
221        V(double, d15) \
222        FOR_EACH_CPU_FPREGISTER_EXTENSION(V)
223
224#if CPU(APPLE_ARMV7S)
225    #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \
226        V(double, d16) \
227        V(double, d17) \
228        V(double, d18) \
229        V(double, d19) \
230        V(double, d20) \
231        V(double, d21) \
232        V(double, d22) \
233        V(double, d23) \
234        V(double, d24) \
235        V(double, d25) \
236        V(double, d26) \
237        V(double, d27) \
238        V(double, d28) \
239        V(double, d29) \
240        V(double, d30) \
241        V(double, d31)
242#else
243    #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add.
244#endif // CPU(APPLE_ARMV7S)
245
246#endif // USE(MASM_PROBE)
247}
248
249class ARMv7Assembler;
250class ARMThumbImmediate {
251    friend class ARMv7Assembler;
252
253    typedef uint8_t ThumbImmediateType;
254    static const ThumbImmediateType TypeInvalid = 0;
255    static const ThumbImmediateType TypeEncoded = 1;
256    static const ThumbImmediateType TypeUInt16 = 2;
257
258    typedef union {
259        int16_t asInt;
260        struct {
261            unsigned imm8 : 8;
262            unsigned imm3 : 3;
263            unsigned i    : 1;
264            unsigned imm4 : 4;
265        };
266        // If this is an encoded immediate, then it may describe a shift, or a pattern.
267        struct {
268            unsigned shiftValue7 : 7;
269            unsigned shiftAmount : 5;
270        };
271        struct {
272            unsigned immediate   : 8;
273            unsigned pattern     : 4;
274        };
275    } ThumbImmediateValue;
276
277    // byte0 contains least significant bit; not using an array to make client code endian agnostic.
278    typedef union {
279        int32_t asInt;
280        struct {
281            uint8_t byte0;
282            uint8_t byte1;
283            uint8_t byte2;
284            uint8_t byte3;
285        };
286    } PatternBytes;
287
288    ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
289    {
290        if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
291            value >>= N;             /* if any were set, lose the bottom N */
292        else                         /* if none of the top N bits are set, */
293            zeros += N;              /* then we have identified N leading zeros */
294    }
295
296    static int32_t countLeadingZeros(uint32_t value)
297    {
298        if (!value)
299            return 32;
300
301        int32_t zeros = 0;
302        countLeadingZerosPartial(value, zeros, 16);
303        countLeadingZerosPartial(value, zeros, 8);
304        countLeadingZerosPartial(value, zeros, 4);
305        countLeadingZerosPartial(value, zeros, 2);
306        countLeadingZerosPartial(value, zeros, 1);
307        return zeros;
308    }
309
310    ARMThumbImmediate()
311        : m_type(TypeInvalid)
312    {
313        m_value.asInt = 0;
314    }
315
316    ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
317        : m_type(type)
318        , m_value(value)
319    {
320    }
321
322    ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
323        : m_type(TypeUInt16)
324    {
325        // Make sure this constructor is only reached with type TypeUInt16;
326        // this extra parameter makes the code a little clearer by making it
327        // explicit at call sites which type is being constructed
328        ASSERT_UNUSED(type, type == TypeUInt16);
329
330        m_value.asInt = value;
331    }
332
333public:
334    static ARMThumbImmediate makeEncodedImm(uint32_t value)
335    {
336        ThumbImmediateValue encoding;
337        encoding.asInt = 0;
338
339        // okay, these are easy.
340        if (value < 256) {
341            encoding.immediate = value;
342            encoding.pattern = 0;
343            return ARMThumbImmediate(TypeEncoded, encoding);
344        }
345
346        int32_t leadingZeros = countLeadingZeros(value);
347        // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
348        ASSERT(leadingZeros < 24);
349
350        // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
351        // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
352        // zero.  count(B) == 8, so the count of bits to be checked is 24 - count(Z).
353        int32_t rightShiftAmount = 24 - leadingZeros;
354        if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
355            // Shift the value down to the low byte position.  The assign to
356            // shiftValue7 drops the implicit top bit.
357            encoding.shiftValue7 = value >> rightShiftAmount;
358            // The endoded shift amount is the magnitude of a right rotate.
359            encoding.shiftAmount = 8 + leadingZeros;
360            return ARMThumbImmediate(TypeEncoded, encoding);
361        }
362
363        PatternBytes bytes;
364        bytes.asInt = value;
365
366        if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
367            encoding.immediate = bytes.byte0;
368            encoding.pattern = 3;
369            return ARMThumbImmediate(TypeEncoded, encoding);
370        }
371
372        if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
373            encoding.immediate = bytes.byte0;
374            encoding.pattern = 1;
375            return ARMThumbImmediate(TypeEncoded, encoding);
376        }
377
378        if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
379            encoding.immediate = bytes.byte1;
380            encoding.pattern = 2;
381            return ARMThumbImmediate(TypeEncoded, encoding);
382        }
383
384        return ARMThumbImmediate();
385    }
386
387    static ARMThumbImmediate makeUInt12(int32_t value)
388    {
389        return (!(value & 0xfffff000))
390            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
391            : ARMThumbImmediate();
392    }
393
394    static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
395    {
396        // If this is not a 12-bit unsigned it, try making an encoded immediate.
397        return (!(value & 0xfffff000))
398            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
399            : makeEncodedImm(value);
400    }
401
402    // The 'make' methods, above, return a !isValid() value if the argument
403    // cannot be represented as the requested type.  This methods  is called
404    // 'get' since the argument can always be represented.
405    static ARMThumbImmediate makeUInt16(uint16_t value)
406    {
407        return ARMThumbImmediate(TypeUInt16, value);
408    }
409
410    bool isValid()
411    {
412        return m_type != TypeInvalid;
413    }
414
415    uint16_t asUInt16() const { return m_value.asInt; }
416
417    // These methods rely on the format of encoded byte values.
418    bool isUInt3() { return !(m_value.asInt & 0xfff8); }
419    bool isUInt4() { return !(m_value.asInt & 0xfff0); }
420    bool isUInt5() { return !(m_value.asInt & 0xffe0); }
421    bool isUInt6() { return !(m_value.asInt & 0xffc0); }
422    bool isUInt7() { return !(m_value.asInt & 0xff80); }
423    bool isUInt8() { return !(m_value.asInt & 0xff00); }
424    bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
425    bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
426    bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
427    bool isUInt16() { return m_type == TypeUInt16; }
428    uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
429    uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
430    uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
431    uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
432    uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
433    uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
434    uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
435    uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
436    uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
437    uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
438
439    bool isEncodedImm() { return m_type == TypeEncoded; }
440
441private:
442    ThumbImmediateType m_type;
443    ThumbImmediateValue m_value;
444};
445
446typedef enum {
447    SRType_LSL,
448    SRType_LSR,
449    SRType_ASR,
450    SRType_ROR,
451
452    SRType_RRX = SRType_ROR
453} ARMShiftType;
454
455class ShiftTypeAndAmount {
456    friend class ARMv7Assembler;
457
458public:
459    ShiftTypeAndAmount()
460    {
461        m_u.type = (ARMShiftType)0;
462        m_u.amount = 0;
463    }
464
465    ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
466    {
467        m_u.type = type;
468        m_u.amount = amount & 31;
469    }
470
471    unsigned lo4() { return m_u.lo4; }
472    unsigned hi4() { return m_u.hi4; }
473
474private:
475    union {
476        struct {
477            unsigned lo4 : 4;
478            unsigned hi4 : 4;
479        };
480        struct {
481            unsigned type   : 2;
482            unsigned amount : 6;
483        };
484    } m_u;
485};
486
487class ARMv7Assembler {
488public:
489    typedef ARMRegisters::RegisterID RegisterID;
490    typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
491    typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
492    typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
493    typedef FPDoubleRegisterID FPRegisterID;
494
495    static RegisterID firstRegister() { return ARMRegisters::r0; }
496    static RegisterID lastRegister() { return ARMRegisters::r13; }
497
498    static FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
499    static FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
500
501    // (HS, LO, HI, LS) -> (AE, B, A, BE)
502    // (VS, VC) -> (O, NO)
503    typedef enum {
504        ConditionEQ, // Zero / Equal.
505        ConditionNE, // Non-zero / Not equal.
506        ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
507        ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
508        ConditionMI, // Negative.
509        ConditionPL, // Positive or zero.
510        ConditionVS, // Overflowed.
511        ConditionVC, // Not overflowed.
512        ConditionHI, // Unsigned higher.
513        ConditionLS, // Unsigned lower or same.
514        ConditionGE, // Signed greater than or equal.
515        ConditionLT, // Signed less than.
516        ConditionGT, // Signed greater than.
517        ConditionLE, // Signed less than or equal.
518        ConditionAL, // Unconditional / Always execute.
519        ConditionInvalid
520    } Condition;
521
522#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
523#define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
524    enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
525                    JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
526                    JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
527                    JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
528                    JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
529    };
530    enum JumpLinkType {
531        LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
532        LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
533        LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
534        LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
535        LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
536        LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
537        LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
538        LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
539    };
540
541    class LinkRecord {
542    public:
543        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
544        {
545            data.realTypes.m_from = from;
546            data.realTypes.m_to = to;
547            data.realTypes.m_type = type;
548            data.realTypes.m_linkType = LinkInvalid;
549            data.realTypes.m_condition = condition;
550        }
551        void operator=(const LinkRecord& other)
552        {
553            data.copyTypes.content[0] = other.data.copyTypes.content[0];
554            data.copyTypes.content[1] = other.data.copyTypes.content[1];
555            data.copyTypes.content[2] = other.data.copyTypes.content[2];
556        }
557        intptr_t from() const { return data.realTypes.m_from; }
558        void setFrom(intptr_t from) { data.realTypes.m_from = from; }
559        intptr_t to() const { return data.realTypes.m_to; }
560        JumpType type() const { return data.realTypes.m_type; }
561        JumpLinkType linkType() const { return data.realTypes.m_linkType; }
562        void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
563        Condition condition() const { return data.realTypes.m_condition; }
564    private:
565        union {
566            struct RealTypes {
567                intptr_t m_from : 31;
568                intptr_t m_to : 31;
569                JumpType m_type : 8;
570                JumpLinkType m_linkType : 8;
571                Condition m_condition : 16;
572            } realTypes;
573            struct CopyTypes {
574                uint32_t content[3];
575            } copyTypes;
576            COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
577        } data;
578    };
579
580    ARMv7Assembler()
581        : m_indexOfLastWatchpoint(INT_MIN)
582        , m_indexOfTailOfLastWatchpoint(INT_MIN)
583    {
584    }
585
586    AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
587
588private:
589
590    // ARMv7, Appx-A.6.3
591    static bool BadReg(RegisterID reg)
592    {
593        return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
594    }
595
596    uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
597    {
598        uint32_t rdMask = (rdNum >> 1) << highBitsShift;
599        if (rdNum & 1)
600            rdMask |= 1 << lowBitShift;
601        return rdMask;
602    }
603
604    uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
605    {
606        uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
607        if (rdNum & 16)
608            rdMask |= 1 << highBitShift;
609        return rdMask;
610    }
611
612    typedef enum {
613        OP_ADD_reg_T1       = 0x1800,
614        OP_SUB_reg_T1       = 0x1A00,
615        OP_ADD_imm_T1       = 0x1C00,
616        OP_SUB_imm_T1       = 0x1E00,
617        OP_MOV_imm_T1       = 0x2000,
618        OP_CMP_imm_T1       = 0x2800,
619        OP_ADD_imm_T2       = 0x3000,
620        OP_SUB_imm_T2       = 0x3800,
621        OP_AND_reg_T1       = 0x4000,
622        OP_EOR_reg_T1       = 0x4040,
623        OP_TST_reg_T1       = 0x4200,
624        OP_RSB_imm_T1       = 0x4240,
625        OP_CMP_reg_T1       = 0x4280,
626        OP_ORR_reg_T1       = 0x4300,
627        OP_MVN_reg_T1       = 0x43C0,
628        OP_ADD_reg_T2       = 0x4400,
629        OP_MOV_reg_T1       = 0x4600,
630        OP_BLX              = 0x4700,
631        OP_BX               = 0x4700,
632        OP_STR_reg_T1       = 0x5000,
633        OP_STRH_reg_T1      = 0x5200,
634        OP_STRB_reg_T1      = 0x5400,
635        OP_LDRSB_reg_T1     = 0x5600,
636        OP_LDR_reg_T1       = 0x5800,
637        OP_LDRH_reg_T1      = 0x5A00,
638        OP_LDRB_reg_T1      = 0x5C00,
639        OP_LDRSH_reg_T1     = 0x5E00,
640        OP_STR_imm_T1       = 0x6000,
641        OP_LDR_imm_T1       = 0x6800,
642        OP_STRB_imm_T1      = 0x7000,
643        OP_LDRB_imm_T1      = 0x7800,
644        OP_STRH_imm_T1      = 0x8000,
645        OP_LDRH_imm_T1      = 0x8800,
646        OP_STR_imm_T2       = 0x9000,
647        OP_LDR_imm_T2       = 0x9800,
648        OP_ADD_SP_imm_T1    = 0xA800,
649        OP_ADD_SP_imm_T2    = 0xB000,
650        OP_SUB_SP_imm_T1    = 0xB080,
651        OP_PUSH_T1          = 0xB400,
652        OP_POP_T1           = 0xBC00,
653        OP_BKPT             = 0xBE00,
654        OP_IT               = 0xBF00,
655        OP_NOP_T1           = 0xBF00,
656    } OpcodeID;
657
658    typedef enum {
659        OP_B_T1         = 0xD000,
660        OP_B_T2         = 0xE000,
661        OP_POP_T2       = 0xE8BD,
662        OP_PUSH_T2      = 0xE92D,
663        OP_AND_reg_T2   = 0xEA00,
664        OP_TST_reg_T2   = 0xEA10,
665        OP_ORR_reg_T2   = 0xEA40,
666        OP_ORR_S_reg_T2 = 0xEA50,
667        OP_ASR_imm_T1   = 0xEA4F,
668        OP_LSL_imm_T1   = 0xEA4F,
669        OP_LSR_imm_T1   = 0xEA4F,
670        OP_ROR_imm_T1   = 0xEA4F,
671        OP_MVN_reg_T2   = 0xEA6F,
672        OP_EOR_reg_T2   = 0xEA80,
673        OP_ADD_reg_T3   = 0xEB00,
674        OP_ADD_S_reg_T3 = 0xEB10,
675        OP_SUB_reg_T2   = 0xEBA0,
676        OP_SUB_S_reg_T2 = 0xEBB0,
677        OP_CMP_reg_T2   = 0xEBB0,
678        OP_VMOV_CtoD    = 0xEC00,
679        OP_VMOV_DtoC    = 0xEC10,
680        OP_FSTS         = 0xED00,
681        OP_VSTR         = 0xED00,
682        OP_FLDS         = 0xED10,
683        OP_VLDR         = 0xED10,
684        OP_VMOV_CtoS    = 0xEE00,
685        OP_VMOV_StoC    = 0xEE10,
686        OP_VMUL_T2      = 0xEE20,
687        OP_VADD_T2      = 0xEE30,
688        OP_VSUB_T2      = 0xEE30,
689        OP_VDIV         = 0xEE80,
690        OP_VABS_T2      = 0xEEB0,
691        OP_VCMP         = 0xEEB0,
692        OP_VCVT_FPIVFP  = 0xEEB0,
693        OP_VMOV_T2      = 0xEEB0,
694        OP_VMOV_IMM_T2  = 0xEEB0,
695        OP_VMRS         = 0xEEB0,
696        OP_VNEG_T2      = 0xEEB0,
697        OP_VSQRT_T1     = 0xEEB0,
698        OP_VCVTSD_T1    = 0xEEB0,
699        OP_VCVTDS_T1    = 0xEEB0,
700        OP_B_T3a        = 0xF000,
701        OP_B_T4a        = 0xF000,
702        OP_AND_imm_T1   = 0xF000,
703        OP_TST_imm      = 0xF010,
704        OP_ORR_imm_T1   = 0xF040,
705        OP_MOV_imm_T2   = 0xF040,
706        OP_MVN_imm      = 0xF060,
707        OP_EOR_imm_T1   = 0xF080,
708        OP_ADD_imm_T3   = 0xF100,
709        OP_ADD_S_imm_T3 = 0xF110,
710        OP_CMN_imm      = 0xF110,
711        OP_ADC_imm      = 0xF140,
712        OP_SUB_imm_T3   = 0xF1A0,
713        OP_SUB_S_imm_T3 = 0xF1B0,
714        OP_CMP_imm_T2   = 0xF1B0,
715        OP_RSB_imm_T2   = 0xF1C0,
716        OP_RSB_S_imm_T2 = 0xF1D0,
717        OP_ADD_imm_T4   = 0xF200,
718        OP_MOV_imm_T3   = 0xF240,
719        OP_SUB_imm_T4   = 0xF2A0,
720        OP_MOVT         = 0xF2C0,
721        OP_UBFX_T1      = 0xF3C0,
722        OP_NOP_T2a      = 0xF3AF,
723        OP_DMB_SY_T2a   = 0xF3BF,
724        OP_STRB_imm_T3  = 0xF800,
725        OP_STRB_reg_T2  = 0xF800,
726        OP_LDRB_imm_T3  = 0xF810,
727        OP_LDRB_reg_T2  = 0xF810,
728        OP_STRH_imm_T3  = 0xF820,
729        OP_STRH_reg_T2  = 0xF820,
730        OP_LDRH_reg_T2  = 0xF830,
731        OP_LDRH_imm_T3  = 0xF830,
732        OP_STR_imm_T4   = 0xF840,
733        OP_STR_reg_T2   = 0xF840,
734        OP_LDR_imm_T4   = 0xF850,
735        OP_LDR_reg_T2   = 0xF850,
736        OP_STRB_imm_T2  = 0xF880,
737        OP_LDRB_imm_T2  = 0xF890,
738        OP_STRH_imm_T2  = 0xF8A0,
739        OP_LDRH_imm_T2  = 0xF8B0,
740        OP_STR_imm_T3   = 0xF8C0,
741        OP_LDR_imm_T3   = 0xF8D0,
742        OP_LDRSB_reg_T2 = 0xF910,
743        OP_LDRSH_reg_T2 = 0xF930,
744        OP_LSL_reg_T2   = 0xFA00,
745        OP_LSR_reg_T2   = 0xFA20,
746        OP_ASR_reg_T2   = 0xFA40,
747        OP_ROR_reg_T2   = 0xFA60,
748        OP_CLZ          = 0xFAB0,
749        OP_SMULL_T1     = 0xFB80,
750#if CPU(APPLE_ARMV7S)
751        OP_SDIV_T1      = 0xFB90,
752        OP_UDIV_T1      = 0xFBB0,
753#endif
754    } OpcodeID1;
755
756    typedef enum {
757        OP_VADD_T2b     = 0x0A00,
758        OP_VDIVb        = 0x0A00,
759        OP_FLDSb        = 0x0A00,
760        OP_VLDRb        = 0x0A00,
761        OP_VMOV_IMM_T2b = 0x0A00,
762        OP_VMOV_T2b     = 0x0A40,
763        OP_VMUL_T2b     = 0x0A00,
764        OP_FSTSb        = 0x0A00,
765        OP_VSTRb        = 0x0A00,
766        OP_VMOV_StoCb   = 0x0A10,
767        OP_VMOV_CtoSb   = 0x0A10,
768        OP_VMOV_DtoCb   = 0x0A10,
769        OP_VMOV_CtoDb   = 0x0A10,
770        OP_VMRSb        = 0x0A10,
771        OP_VABS_T2b     = 0x0A40,
772        OP_VCMPb        = 0x0A40,
773        OP_VCVT_FPIVFPb = 0x0A40,
774        OP_VNEG_T2b     = 0x0A40,
775        OP_VSUB_T2b     = 0x0A40,
776        OP_VSQRT_T1b    = 0x0A40,
777        OP_VCVTSD_T1b   = 0x0A40,
778        OP_VCVTDS_T1b   = 0x0A40,
779        OP_NOP_T2b      = 0x8000,
780        OP_DMB_SY_T2b   = 0x8F5F,
781        OP_B_T3b        = 0x8000,
782        OP_B_T4b        = 0x9000,
783    } OpcodeID2;
784
785    struct FourFours {
786        FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
787        {
788            m_u.f0 = f0;
789            m_u.f1 = f1;
790            m_u.f2 = f2;
791            m_u.f3 = f3;
792        }
793
794        union {
795            unsigned value;
796            struct {
797                unsigned f0 : 4;
798                unsigned f1 : 4;
799                unsigned f2 : 4;
800                unsigned f3 : 4;
801            };
802        } m_u;
803    };
804
805    class ARMInstructionFormatter;
806
807    // false means else!
808    static bool ifThenElseConditionBit(Condition condition, bool isIf)
809    {
810        return isIf ? (condition & 1) : !(condition & 1);
811    }
812    static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
813    {
814        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
815            | (ifThenElseConditionBit(condition, inst3if) << 2)
816            | (ifThenElseConditionBit(condition, inst4if) << 1)
817            | 1;
818        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
819        return (condition << 4) | mask;
820    }
821    static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
822    {
823        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
824            | (ifThenElseConditionBit(condition, inst3if) << 2)
825            | 2;
826        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
827        return (condition << 4) | mask;
828    }
829    static uint8_t ifThenElse(Condition condition, bool inst2if)
830    {
831        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
832            | 4;
833        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
834        return (condition << 4) | mask;
835    }
836
837    static uint8_t ifThenElse(Condition condition)
838    {
839        int mask = 8;
840        return (condition << 4) | mask;
841    }
842
843public:
844
845    void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
846    {
847        // Rd can only be SP if Rn is also SP.
848        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
849        ASSERT(rd != ARMRegisters::pc);
850        ASSERT(rn != ARMRegisters::pc);
851        ASSERT(imm.isEncodedImm());
852
853        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
854    }
855
856    void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
857    {
858        // Rd can only be SP if Rn is also SP.
859        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
860        ASSERT(rd != ARMRegisters::pc);
861        ASSERT(rn != ARMRegisters::pc);
862        ASSERT(imm.isValid());
863
864        if (rn == ARMRegisters::sp && imm.isUInt16()) {
865            ASSERT(!(imm.getUInt16() & 3));
866            if (!(rd & 8) && imm.isUInt10()) {
867                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
868                return;
869            } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
870                m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
871                return;
872            }
873        } else if (!((rd | rn) & 8)) {
874            if (imm.isUInt3()) {
875                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
876                return;
877            } else if ((rd == rn) && imm.isUInt8()) {
878                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
879                return;
880            }
881        }
882
883        if (imm.isEncodedImm())
884            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
885        else {
886            ASSERT(imm.isUInt12());
887            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
888        }
889    }
890
891    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
892    {
893        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
894        ASSERT(rd != ARMRegisters::pc);
895        ASSERT(rn != ARMRegisters::pc);
896        ASSERT(!BadReg(rm));
897        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
898    }
899
900    // NOTE: In an IT block, add doesn't modify the flags register.
901    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
902    {
903        if (rd == ARMRegisters::sp) {
904            mov(rd, rn);
905            rn = rd;
906        }
907
908        if (rd == rn)
909            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
910        else if (rd == rm)
911            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
912        else if (!((rd | rn | rm) & 8))
913            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
914        else
915            add(rd, rn, rm, ShiftTypeAndAmount());
916    }
917
918    // Not allowed in an IT (if then) block.
919    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
920    {
921        // Rd can only be SP if Rn is also SP.
922        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
923        ASSERT(rd != ARMRegisters::pc);
924        ASSERT(rn != ARMRegisters::pc);
925        ASSERT(imm.isEncodedImm());
926
927        if (!((rd | rn) & 8)) {
928            if (imm.isUInt3()) {
929                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
930                return;
931            } else if ((rd == rn) && imm.isUInt8()) {
932                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
933                return;
934            }
935        }
936
937        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
938    }
939
940    // Not allowed in an IT (if then) block?
941    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
942    {
943        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
944        ASSERT(rd != ARMRegisters::pc);
945        ASSERT(rn != ARMRegisters::pc);
946        ASSERT(!BadReg(rm));
947        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
948    }
949
950    // Not allowed in an IT (if then) block.
951    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
952    {
953        if (!((rd | rn | rm) & 8))
954            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
955        else
956            add_S(rd, rn, rm, ShiftTypeAndAmount());
957    }
958
959    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
960    {
961        ASSERT(!BadReg(rd));
962        ASSERT(!BadReg(rn));
963        ASSERT(imm.isEncodedImm());
964        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
965    }
966
967    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
968    {
969        ASSERT(!BadReg(rd));
970        ASSERT(!BadReg(rn));
971        ASSERT(!BadReg(rm));
972        m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
973    }
974
975    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
976    {
977        if ((rd == rn) && !((rd | rm) & 8))
978            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
979        else if ((rd == rm) && !((rd | rn) & 8))
980            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
981        else
982            ARM_and(rd, rn, rm, ShiftTypeAndAmount());
983    }
984
985    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
986    {
987        ASSERT(!BadReg(rd));
988        ASSERT(!BadReg(rm));
989        ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
990        m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
991    }
992
993    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
994    {
995        ASSERT(!BadReg(rd));
996        ASSERT(!BadReg(rn));
997        ASSERT(!BadReg(rm));
998        m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
999    }
1000
1001    // Only allowed in IT (if then) block if last instruction.
1002    ALWAYS_INLINE AssemblerLabel b()
1003    {
1004        m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
1005        return m_formatter.label();
1006    }
1007
1008    // Only allowed in IT (if then) block if last instruction.
1009    ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
1010    {
1011        ASSERT(rm != ARMRegisters::pc);
1012        m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
1013        return m_formatter.label();
1014    }
1015
1016    // Only allowed in IT (if then) block if last instruction.
1017    ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
1018    {
1019        m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
1020        return m_formatter.label();
1021    }
1022
1023    void bkpt(uint8_t imm = 0)
1024    {
1025        m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
1026    }
1027
1028    ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
1029    {
1030        ASSERT(!BadReg(rd));
1031        ASSERT(!BadReg(rm));
1032        m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
1033    }
1034
1035    ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
1036    {
1037        ASSERT(rn != ARMRegisters::pc);
1038        ASSERT(imm.isEncodedImm());
1039
1040        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
1041    }
1042
1043    ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
1044    {
1045        ASSERT(rn != ARMRegisters::pc);
1046        ASSERT(imm.isEncodedImm());
1047
1048        if (!(rn & 8) && imm.isUInt8())
1049            m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
1050        else
1051            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
1052    }
1053
1054    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1055    {
1056        ASSERT(rn != ARMRegisters::pc);
1057        ASSERT(!BadReg(rm));
1058        m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1059    }
1060
1061    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
1062    {
1063        if ((rn | rm) & 8)
1064            cmp(rn, rm, ShiftTypeAndAmount());
1065        else
1066            m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
1067    }
1068
1069    // xor is not spelled with an 'e'. :-(
1070    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1071    {
1072        ASSERT(!BadReg(rd));
1073        ASSERT(!BadReg(rn));
1074        ASSERT(imm.isEncodedImm());
1075        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
1076    }
1077
1078    // xor is not spelled with an 'e'. :-(
1079    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1080    {
1081        ASSERT(!BadReg(rd));
1082        ASSERT(!BadReg(rn));
1083        ASSERT(!BadReg(rm));
1084        m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1085    }
1086
1087    // xor is not spelled with an 'e'. :-(
1088    void eor(RegisterID rd, RegisterID rn, RegisterID rm)
1089    {
1090        if ((rd == rn) && !((rd | rm) & 8))
1091            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1092        else if ((rd == rm) && !((rd | rn) & 8))
1093            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1094        else
1095            eor(rd, rn, rm, ShiftTypeAndAmount());
1096    }
1097
1098    ALWAYS_INLINE void it(Condition cond)
1099    {
1100        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1101    }
1102
1103    ALWAYS_INLINE void it(Condition cond, bool inst2if)
1104    {
1105        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1106    }
1107
1108    ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1109    {
1110        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1111    }
1112
1113    ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1114    {
1115        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1116    }
1117
1118    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1119    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1120    {
1121        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1122        ASSERT(imm.isUInt12());
1123
1124        if (!((rt | rn) & 8) && imm.isUInt7())
1125            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1126        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1127            m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1128        else
1129            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1130    }
1131
1132    ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1133    {
1134        ASSERT(rn != ARMRegisters::pc);
1135        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1136    }
1137
1138    ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1139    {
1140        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1141        ASSERT(imm.isUInt7());
1142        ASSERT(!((rt | rn) & 8));
1143        m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1144    }
1145
1146    // If index is set, this is a regular offset or a pre-indexed load;
1147    // if index is not set then is is a post-index load.
1148    //
1149    // If wback is set rn is updated - this is a pre or post index load,
1150    // if wback is not set this is a regular offset memory access.
1151    //
1152    // (-255 <= offset <= 255)
1153    // _reg = REG[rn]
1154    // _tmp = _reg + offset
1155    // MEM[index ? _tmp : _reg] = REG[rt]
1156    // if (wback) REG[rn] = _tmp
1157    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1158    {
1159        ASSERT(rt != ARMRegisters::pc);
1160        ASSERT(rn != ARMRegisters::pc);
1161        ASSERT(index || wback);
1162        ASSERT(!wback | (rt != rn));
1163
1164        bool add = true;
1165        if (offset < 0) {
1166            add = false;
1167            offset = -offset;
1168        }
1169        ASSERT((offset & ~0xff) == 0);
1170
1171        offset |= (wback << 8);
1172        offset |= (add   << 9);
1173        offset |= (index << 10);
1174        offset |= (1 << 11);
1175
1176        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1177    }
1178
1179    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1180    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1181    {
1182        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1183        ASSERT(!BadReg(rm));
1184        ASSERT(shift <= 3);
1185
1186        if (!shift && !((rt | rn | rm) & 8))
1187            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1188        else
1189            m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1190    }
1191
1192    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1193    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1194    {
1195        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1196        ASSERT(imm.isUInt12());
1197
1198        if (!((rt | rn) & 8) && imm.isUInt6())
1199            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1200        else
1201            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1202    }
1203
1204    // If index is set, this is a regular offset or a pre-indexed load;
1205    // if index is not set then is is a post-index load.
1206    //
1207    // If wback is set rn is updated - this is a pre or post index load,
1208    // if wback is not set this is a regular offset memory access.
1209    //
1210    // (-255 <= offset <= 255)
1211    // _reg = REG[rn]
1212    // _tmp = _reg + offset
1213    // MEM[index ? _tmp : _reg] = REG[rt]
1214    // if (wback) REG[rn] = _tmp
1215    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1216    {
1217        ASSERT(rt != ARMRegisters::pc);
1218        ASSERT(rn != ARMRegisters::pc);
1219        ASSERT(index || wback);
1220        ASSERT(!wback | (rt != rn));
1221
1222        bool add = true;
1223        if (offset < 0) {
1224            add = false;
1225            offset = -offset;
1226        }
1227        ASSERT((offset & ~0xff) == 0);
1228
1229        offset |= (wback << 8);
1230        offset |= (add   << 9);
1231        offset |= (index << 10);
1232        offset |= (1 << 11);
1233
1234        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1235    }
1236
1237    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1238    {
1239        ASSERT(!BadReg(rt));   // Memory hint
1240        ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1241        ASSERT(!BadReg(rm));
1242        ASSERT(shift <= 3);
1243
1244        if (!shift && !((rt | rn | rm) & 8))
1245            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1246        else
1247            m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1248    }
1249
1250    void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1251    {
1252        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1253        ASSERT(imm.isUInt12());
1254
1255        if (!((rt | rn) & 8) && imm.isUInt5())
1256            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1257        else
1258            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1259    }
1260
1261    void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1262    {
1263        ASSERT(rt != ARMRegisters::pc);
1264        ASSERT(rn != ARMRegisters::pc);
1265        ASSERT(index || wback);
1266        ASSERT(!wback | (rt != rn));
1267
1268        bool add = true;
1269        if (offset < 0) {
1270            add = false;
1271            offset = -offset;
1272        }
1273
1274        ASSERT(!(offset & ~0xff));
1275
1276        offset |= (wback << 8);
1277        offset |= (add   << 9);
1278        offset |= (index << 10);
1279        offset |= (1 << 11);
1280
1281        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1282    }
1283
1284    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1285    {
1286        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1287        ASSERT(!BadReg(rm));
1288        ASSERT(shift <= 3);
1289
1290        if (!shift && !((rt | rn | rm) & 8))
1291            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1292        else
1293            m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1294    }
1295
1296    void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1297    {
1298        ASSERT(rn != ARMRegisters::pc);
1299        ASSERT(!BadReg(rm));
1300        ASSERT(shift <= 3);
1301
1302        if (!shift && !((rt | rn | rm) & 8))
1303            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1304        else
1305            m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1306    }
1307
1308    void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1309    {
1310        ASSERT(rn != ARMRegisters::pc);
1311        ASSERT(!BadReg(rm));
1312        ASSERT(shift <= 3);
1313
1314        if (!shift && !((rt | rn | rm) & 8))
1315            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1316        else
1317            m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1318    }
1319
1320    void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1321    {
1322        ASSERT(!BadReg(rd));
1323        ASSERT(!BadReg(rm));
1324        ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1325        m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1326    }
1327
1328    ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1329    {
1330        ASSERT(!BadReg(rd));
1331        ASSERT(!BadReg(rn));
1332        ASSERT(!BadReg(rm));
1333        m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1334    }
1335
1336    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1337    {
1338        ASSERT(!BadReg(rd));
1339        ASSERT(!BadReg(rm));
1340        ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1341        m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1342    }
1343
1344    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1345    {
1346        ASSERT(!BadReg(rd));
1347        ASSERT(!BadReg(rn));
1348        ASSERT(!BadReg(rm));
1349        m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1350    }
1351
1352    ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1353    {
1354        ASSERT(imm.isValid());
1355        ASSERT(!imm.isEncodedImm());
1356        ASSERT(!BadReg(rd));
1357
1358        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1359    }
1360
1361#if OS(LINUX)
1362    static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
1363    {
1364        uint16_t* address = static_cast<uint16_t*>(instructionStart);
1365        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
1366        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
1367        address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
1368        address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
1369        address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
1370        address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
1371        address[4] = OP_CMP_reg_T2 | left;
1372        cacheFlush(address, sizeof(uint16_t) * 5);
1373    }
1374#else
1375    static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
1376    {
1377        ASSERT(imm.isValid());
1378        ASSERT(!imm.isEncodedImm());
1379        ASSERT(!BadReg(rd));
1380
1381        uint16_t* address = static_cast<uint16_t*>(instructionStart);
1382        address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
1383        address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
1384        cacheFlush(address, sizeof(uint16_t) * 2);
1385    }
1386#endif
1387
1388    ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1389    {
1390        ASSERT(imm.isValid());
1391        ASSERT(!BadReg(rd));
1392
1393        if ((rd < 8) && imm.isUInt8())
1394            m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1395        else if (imm.isEncodedImm())
1396            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1397        else
1398            movT3(rd, imm);
1399    }
1400
1401    ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1402    {
1403        m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1404    }
1405
1406    ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1407    {
1408        ASSERT(imm.isUInt16());
1409        ASSERT(!BadReg(rd));
1410        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1411    }
1412
1413    ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1414    {
1415        ASSERT(imm.isEncodedImm());
1416        ASSERT(!BadReg(rd));
1417
1418        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1419    }
1420
1421    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1422    {
1423        ASSERT(!BadReg(rd));
1424        ASSERT(!BadReg(rm));
1425        m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1426    }
1427
1428    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1429    {
1430        if (!((rd | rm) & 8))
1431            m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1432        else
1433            mvn(rd, rm, ShiftTypeAndAmount());
1434    }
1435
1436    ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1437    {
1438        ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1439        sub(rd, zero, rm);
1440    }
1441
1442    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1443    {
1444        ASSERT(!BadReg(rd));
1445        ASSERT(!BadReg(rn));
1446        ASSERT(imm.isEncodedImm());
1447        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1448    }
1449
1450    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1451    {
1452        ASSERT(!BadReg(rd));
1453        ASSERT(!BadReg(rn));
1454        ASSERT(!BadReg(rm));
1455        m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1456    }
1457
1458    void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1459    {
1460        if ((rd == rn) && !((rd | rm) & 8))
1461            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1462        else if ((rd == rm) && !((rd | rn) & 8))
1463            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1464        else
1465            orr(rd, rn, rm, ShiftTypeAndAmount());
1466    }
1467
1468    ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1469    {
1470        ASSERT(!BadReg(rd));
1471        ASSERT(!BadReg(rn));
1472        ASSERT(!BadReg(rm));
1473        m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1474    }
1475
1476    void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1477    {
1478        if ((rd == rn) && !((rd | rm) & 8))
1479            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1480        else if ((rd == rm) && !((rd | rn) & 8))
1481            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1482        else
1483            orr_S(rd, rn, rm, ShiftTypeAndAmount());
1484    }
1485
1486    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1487    {
1488        ASSERT(!BadReg(rd));
1489        ASSERT(!BadReg(rm));
1490        ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1491        m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1492    }
1493
1494    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1495    {
1496        ASSERT(!BadReg(rd));
1497        ASSERT(!BadReg(rn));
1498        ASSERT(!BadReg(rm));
1499        m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1500    }
1501
1502    ALWAYS_INLINE void pop(RegisterID dest)
1503    {
1504        if (dest < ARMRegisters::r8)
1505            m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
1506        else {
1507            // Load postindexed with writeback.
1508            ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
1509        }
1510    }
1511
1512    ALWAYS_INLINE void pop(uint32_t registerList)
1513    {
1514        ASSERT(WTF::bitCount(registerList) > 1);
1515        ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
1516        ASSERT(!((1 << ARMRegisters::sp) & registerList));
1517        m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
1518    }
1519
1520    ALWAYS_INLINE void push(RegisterID src)
1521    {
1522        if (src < ARMRegisters::r8)
1523            m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
1524        else if (src == ARMRegisters::lr)
1525            m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
1526        else {
1527            // Store preindexed with writeback.
1528            str(src, ARMRegisters::sp, -sizeof(void*), true, true);
1529        }
1530    }
1531
1532    ALWAYS_INLINE void push(uint32_t registerList)
1533    {
1534        ASSERT(WTF::bitCount(registerList) > 1);
1535        ASSERT(!((1 << ARMRegisters::pc) & registerList));
1536        ASSERT(!((1 << ARMRegisters::sp) & registerList));
1537        m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
1538    }
1539
1540#if CPU(APPLE_ARMV7S)
1541    template<int datasize>
1542    ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1543    {
1544        static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
1545        ASSERT(!BadReg(rd));
1546        ASSERT(!BadReg(rn));
1547        ASSERT(!BadReg(rm));
1548        m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1549    }
1550#endif
1551
1552    ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1553    {
1554        ASSERT(!BadReg(rdLo));
1555        ASSERT(!BadReg(rdHi));
1556        ASSERT(!BadReg(rn));
1557        ASSERT(!BadReg(rm));
1558        ASSERT(rdLo != rdHi);
1559        m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1560    }
1561
1562    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1563    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1564    {
1565        ASSERT(rt != ARMRegisters::pc);
1566        ASSERT(rn != ARMRegisters::pc);
1567        ASSERT(imm.isUInt12());
1568
1569        if (!((rt | rn) & 8) && imm.isUInt7())
1570            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1571        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1572            m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1573        else
1574            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1575    }
1576
1577    // If index is set, this is a regular offset or a pre-indexed store;
1578    // if index is not set then is is a post-index store.
1579    //
1580    // If wback is set rn is updated - this is a pre or post index store,
1581    // if wback is not set this is a regular offset memory access.
1582    //
1583    // (-255 <= offset <= 255)
1584    // _reg = REG[rn]
1585    // _tmp = _reg + offset
1586    // MEM[index ? _tmp : _reg] = REG[rt]
1587    // if (wback) REG[rn] = _tmp
1588    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1589    {
1590        ASSERT(rt != ARMRegisters::pc);
1591        ASSERT(rn != ARMRegisters::pc);
1592        ASSERT(index || wback);
1593        ASSERT(!wback | (rt != rn));
1594
1595        bool add = true;
1596        if (offset < 0) {
1597            add = false;
1598            offset = -offset;
1599        }
1600        ASSERT((offset & ~0xff) == 0);
1601
1602        offset |= (wback << 8);
1603        offset |= (add   << 9);
1604        offset |= (index << 10);
1605        offset |= (1 << 11);
1606
1607        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1608    }
1609
1610    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1611    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1612    {
1613        ASSERT(rn != ARMRegisters::pc);
1614        ASSERT(!BadReg(rm));
1615        ASSERT(shift <= 3);
1616
1617        if (!shift && !((rt | rn | rm) & 8))
1618            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1619        else
1620            m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1621    }
1622
1623    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1624    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1625    {
1626        ASSERT(rt != ARMRegisters::pc);
1627        ASSERT(rn != ARMRegisters::pc);
1628        ASSERT(imm.isUInt12());
1629
1630        if (!((rt | rn) & 8) && imm.isUInt7())
1631            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1632        else
1633            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1634    }
1635
1636    // If index is set, this is a regular offset or a pre-indexed store;
1637    // if index is not set then is is a post-index store.
1638    //
1639    // If wback is set rn is updated - this is a pre or post index store,
1640    // if wback is not set this is a regular offset memory access.
1641    //
1642    // (-255 <= offset <= 255)
1643    // _reg = REG[rn]
1644    // _tmp = _reg + offset
1645    // MEM[index ? _tmp : _reg] = REG[rt]
1646    // if (wback) REG[rn] = _tmp
1647    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1648    {
1649        ASSERT(rt != ARMRegisters::pc);
1650        ASSERT(rn != ARMRegisters::pc);
1651        ASSERT(index || wback);
1652        ASSERT(!wback | (rt != rn));
1653
1654        bool add = true;
1655        if (offset < 0) {
1656            add = false;
1657            offset = -offset;
1658        }
1659        ASSERT((offset & ~0xff) == 0);
1660
1661        offset |= (wback << 8);
1662        offset |= (add   << 9);
1663        offset |= (index << 10);
1664        offset |= (1 << 11);
1665
1666        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1667    }
1668
1669    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1670    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1671    {
1672        ASSERT(rn != ARMRegisters::pc);
1673        ASSERT(!BadReg(rm));
1674        ASSERT(shift <= 3);
1675
1676        if (!shift && !((rt | rn | rm) & 8))
1677            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1678        else
1679            m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1680    }
1681
1682    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1683    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1684    {
1685        ASSERT(rt != ARMRegisters::pc);
1686        ASSERT(rn != ARMRegisters::pc);
1687        ASSERT(imm.isUInt12());
1688
1689        if (!((rt | rn) & 8) && imm.isUInt7())
1690            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
1691        else
1692            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1693    }
1694
1695    // If index is set, this is a regular offset or a pre-indexed store;
1696    // if index is not set then is is a post-index store.
1697    //
1698    // If wback is set rn is updated - this is a pre or post index store,
1699    // if wback is not set this is a regular offset memory access.
1700    //
1701    // (-255 <= offset <= 255)
1702    // _reg = REG[rn]
1703    // _tmp = _reg + offset
1704    // MEM[index ? _tmp : _reg] = REG[rt]
1705    // if (wback) REG[rn] = _tmp
1706    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1707    {
1708        ASSERT(rt != ARMRegisters::pc);
1709        ASSERT(rn != ARMRegisters::pc);
1710        ASSERT(index || wback);
1711        ASSERT(!wback | (rt != rn));
1712
1713        bool add = true;
1714        if (offset < 0) {
1715            add = false;
1716            offset = -offset;
1717        }
1718        ASSERT(!(offset & ~0xff));
1719
1720        offset |= (wback << 8);
1721        offset |= (add   << 9);
1722        offset |= (index << 10);
1723        offset |= (1 << 11);
1724
1725        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1726    }
1727
1728    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1729    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1730    {
1731        ASSERT(rn != ARMRegisters::pc);
1732        ASSERT(!BadReg(rm));
1733        ASSERT(shift <= 3);
1734
1735        if (!shift && !((rt | rn | rm) & 8))
1736            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1737        else
1738            m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1739    }
1740
1741    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1742    {
1743        // Rd can only be SP if Rn is also SP.
1744        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1745        ASSERT(rd != ARMRegisters::pc);
1746        ASSERT(rn != ARMRegisters::pc);
1747        ASSERT(imm.isValid());
1748
1749        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1750            ASSERT(!(imm.getUInt16() & 3));
1751            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1752            return;
1753        } else if (!((rd | rn) & 8)) {
1754            if (imm.isUInt3()) {
1755                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1756                return;
1757            } else if ((rd == rn) && imm.isUInt8()) {
1758                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1759                return;
1760            }
1761        }
1762
1763        if (imm.isEncodedImm())
1764            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1765        else {
1766            ASSERT(imm.isUInt12());
1767            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1768        }
1769    }
1770
1771    ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1772    {
1773        ASSERT(rd != ARMRegisters::pc);
1774        ASSERT(rn != ARMRegisters::pc);
1775        ASSERT(imm.isValid());
1776        ASSERT(imm.isUInt12());
1777
1778        if (!((rd | rn) & 8) && !imm.getUInt12())
1779            m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1780        else
1781            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1782    }
1783
1784    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1785    {
1786        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1787        ASSERT(rd != ARMRegisters::pc);
1788        ASSERT(rn != ARMRegisters::pc);
1789        ASSERT(!BadReg(rm));
1790        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1791    }
1792
1793    // NOTE: In an IT block, add doesn't modify the flags register.
1794    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1795    {
1796        if (!((rd | rn | rm) & 8))
1797            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1798        else
1799            sub(rd, rn, rm, ShiftTypeAndAmount());
1800    }
1801
1802    // Not allowed in an IT (if then) block.
1803    void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1804    {
1805        // Rd can only be SP if Rn is also SP.
1806        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1807        ASSERT(rd != ARMRegisters::pc);
1808        ASSERT(rn != ARMRegisters::pc);
1809        ASSERT(imm.isValid());
1810
1811        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1812            ASSERT(!(imm.getUInt16() & 3));
1813            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1814            return;
1815        } else if (!((rd | rn) & 8)) {
1816            if (imm.isUInt3()) {
1817                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1818                return;
1819            } else if ((rd == rn) && imm.isUInt8()) {
1820                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1821                return;
1822            }
1823        }
1824
1825        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1826    }
1827
1828    ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1829    {
1830        ASSERT(rd != ARMRegisters::pc);
1831        ASSERT(rn != ARMRegisters::pc);
1832        ASSERT(imm.isValid());
1833        ASSERT(imm.isUInt12());
1834
1835        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1836    }
1837
1838    // Not allowed in an IT (if then) block?
1839    ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1840    {
1841        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1842        ASSERT(rd != ARMRegisters::pc);
1843        ASSERT(rn != ARMRegisters::pc);
1844        ASSERT(!BadReg(rm));
1845        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1846    }
1847
1848    // Not allowed in an IT (if then) block.
1849    ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1850    {
1851        if (!((rd | rn | rm) & 8))
1852            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1853        else
1854            sub_S(rd, rn, rm, ShiftTypeAndAmount());
1855    }
1856
1857    ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1858    {
1859        ASSERT(!BadReg(rn));
1860        ASSERT(imm.isEncodedImm());
1861
1862        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1863    }
1864
1865    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1866    {
1867        ASSERT(!BadReg(rn));
1868        ASSERT(!BadReg(rm));
1869        m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1870    }
1871
1872    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1873    {
1874        if ((rn | rm) & 8)
1875            tst(rn, rm, ShiftTypeAndAmount());
1876        else
1877            m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1878    }
1879
1880    ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1881    {
1882        ASSERT(lsb < 32);
1883        ASSERT((width >= 1) && (width <= 32));
1884        ASSERT((lsb + width) <= 32);
1885        m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1886    }
1887
1888#if CPU(APPLE_ARMV7S)
1889    ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1890    {
1891        ASSERT(!BadReg(rd));
1892        ASSERT(!BadReg(rn));
1893        ASSERT(!BadReg(rm));
1894        m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1895    }
1896#endif
1897
1898    void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1899    {
1900        m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1901    }
1902
1903    void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1904    {
1905        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1906    }
1907
1908    void vcmpz(FPDoubleRegisterID rd)
1909    {
1910        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1911    }
1912
1913    void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1914    {
1915        // boolean values are 64bit (toInt, unsigned, roundZero)
1916        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1917    }
1918
1919    void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1920    {
1921        // boolean values are 64bit (toInt, unsigned, roundZero)
1922        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1923    }
1924
1925    void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1926    {
1927        // boolean values are 64bit (toInt, unsigned, roundZero)
1928        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1929    }
1930
1931    void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1932    {
1933        m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1934    }
1935
1936    void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1937    {
1938        m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1939    }
1940
1941    void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1942    {
1943        m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1944    }
1945
1946    void vmov(RegisterID rd, FPSingleRegisterID rn)
1947    {
1948        ASSERT(!BadReg(rd));
1949        m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1950    }
1951
1952    void vmov(FPSingleRegisterID rd, RegisterID rn)
1953    {
1954        ASSERT(!BadReg(rn));
1955        m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1956    }
1957
1958    void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1959    {
1960        ASSERT(!BadReg(rd1));
1961        ASSERT(!BadReg(rd2));
1962        m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1963    }
1964
1965    void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1966    {
1967        ASSERT(!BadReg(rn1));
1968        ASSERT(!BadReg(rn2));
1969        m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1970    }
1971
1972    void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1973    {
1974        m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1975    }
1976
1977    void vmrs(RegisterID reg = ARMRegisters::pc)
1978    {
1979        ASSERT(reg != ARMRegisters::sp);
1980        m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1981    }
1982
1983    void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1984    {
1985        m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1986    }
1987
1988    void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1989    {
1990        m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1991    }
1992
1993    void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1994    {
1995        m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1996    }
1997
1998    void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1999    {
2000        m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
2001    }
2002
2003    void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
2004    {
2005        m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
2006    }
2007
2008    void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
2009    {
2010        m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
2011    }
2012
2013    void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
2014    {
2015        m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
2016    }
2017
2018    void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
2019    {
2020        m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
2021    }
2022
2023    void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
2024    {
2025        m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
2026    }
2027
2028    void nop()
2029    {
2030        m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
2031    }
2032
2033    void nopw()
2034    {
2035        m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
2036    }
2037
2038    void dmbSY()
2039    {
2040        m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b);
2041    }
2042
2043    AssemblerLabel labelIgnoringWatchpoints()
2044    {
2045        return m_formatter.label();
2046    }
2047
2048    AssemblerLabel labelForWatchpoint()
2049    {
2050        AssemblerLabel result = m_formatter.label();
2051        if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
2052            result = label();
2053        m_indexOfLastWatchpoint = result.m_offset;
2054        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
2055        return result;
2056    }
2057
2058    AssemblerLabel label()
2059    {
2060        AssemblerLabel result = m_formatter.label();
2061        while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
2062            if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
2063                nopw();
2064            else
2065                nop();
2066            result = m_formatter.label();
2067        }
2068        return result;
2069    }
2070
2071    AssemblerLabel align(int alignment)
2072    {
2073        while (!m_formatter.isAligned(alignment))
2074            bkpt();
2075
2076        return label();
2077    }
2078
2079    static void* getRelocatedAddress(void* code, AssemblerLabel label)
2080    {
2081        ASSERT(label.isSet());
2082        return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
2083    }
2084
2085    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
2086    {
2087        return b.m_offset - a.m_offset;
2088    }
2089
2090    static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
2091
2092    // Assembler admin methods:
2093
2094    static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
2095    {
2096        return a.from() < b.from();
2097    }
2098
2099    static bool canCompact(JumpType jumpType)
2100    {
2101        // The following cannot be compacted:
2102        //   JumpFixed: represents custom jump sequence
2103        //   JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
2104        //   JumpConditionFixedSize: represents conditional jump that must remain a fixed size
2105        return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
2106    }
2107
2108    static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
2109    {
2110        if (jumpType == JumpFixed)
2111            return LinkInvalid;
2112
2113        // for patchable jump we must leave space for the longest code sequence
2114        if (jumpType == JumpNoConditionFixedSize)
2115            return LinkBX;
2116        if (jumpType == JumpConditionFixedSize)
2117            return LinkConditionalBX;
2118
2119        const int paddingSize = JUMP_ENUM_SIZE(jumpType);
2120
2121        if (jumpType == JumpCondition) {
2122            // 2-byte conditional T1
2123            const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
2124            if (canBeJumpT1(jumpT1Location, to))
2125                return LinkJumpT1;
2126            // 4-byte conditional T3
2127            const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
2128            if (canBeJumpT3(jumpT3Location, to))
2129                return LinkJumpT3;
2130            // 4-byte conditional T4 with IT
2131            const uint16_t* conditionalJumpT4Location =
2132            reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
2133            if (canBeJumpT4(conditionalJumpT4Location, to))
2134                return LinkConditionalJumpT4;
2135        } else {
2136            // 2-byte unconditional T2
2137            const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
2138            if (canBeJumpT2(jumpT2Location, to))
2139                return LinkJumpT2;
2140            // 4-byte unconditional T4
2141            const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
2142            if (canBeJumpT4(jumpT4Location, to))
2143                return LinkJumpT4;
2144            // use long jump sequence
2145            return LinkBX;
2146        }
2147
2148        ASSERT(jumpType == JumpCondition);
2149        return LinkConditionalBX;
2150    }
2151
2152    static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2153    {
2154        JumpLinkType linkType = computeJumpType(record.type(), from, to);
2155        record.setLinkType(linkType);
2156        return linkType;
2157    }
2158
2159    Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2160    {
2161        std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2162        return m_jumpsToLink;
2163    }
2164
2165    static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
2166    {
2167        switch (record.linkType()) {
2168        case LinkJumpT1:
2169            linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2170            break;
2171        case LinkJumpT2:
2172            linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
2173            break;
2174        case LinkJumpT3:
2175            linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2176            break;
2177        case LinkJumpT4:
2178            linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
2179            break;
2180        case LinkConditionalJumpT4:
2181            linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2182            break;
2183        case LinkConditionalBX:
2184            linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2185            break;
2186        case LinkBX:
2187            linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
2188            break;
2189        default:
2190            RELEASE_ASSERT_NOT_REACHED();
2191            break;
2192        }
2193    }
2194
2195    void* unlinkedCode() { return m_formatter.data(); }
2196    size_t codeSize() const { return m_formatter.codeSize(); }
2197
2198    static unsigned getCallReturnOffset(AssemblerLabel call)
2199    {
2200        ASSERT(call.isSet());
2201        return call.m_offset;
2202    }
2203
2204    // Linking & patching:
2205    //
2206    // 'link' and 'patch' methods are for use on unprotected code - such as the code
2207    // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
2208    // code has been finalized it is (platform support permitting) within a non-
2209    // writable region of memory; to modify the code in an execute-only execuable
2210    // pool the 'repatch' and 'relink' methods should be used.
2211
2212    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2213    {
2214        ASSERT(to.isSet());
2215        ASSERT(from.isSet());
2216        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2217    }
2218
2219    static void linkJump(void* code, AssemblerLabel from, void* to)
2220    {
2221        ASSERT(from.isSet());
2222
2223        uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2224        linkJumpAbsolute(location, to);
2225    }
2226
2227    static void linkCall(void* code, AssemblerLabel from, void* to)
2228    {
2229        ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2230        ASSERT(from.isSet());
2231
2232        setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
2233    }
2234
2235    static void linkPointer(void* code, AssemblerLabel where, void* value)
2236    {
2237        setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
2238    }
2239
2240    static void relinkJump(void* from, void* to)
2241    {
2242        ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2243        ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2244
2245        linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
2246
2247        cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2248    }
2249
2250    static void relinkCall(void* from, void* to)
2251    {
2252        ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2253
2254        setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2255    }
2256
2257    static void* readCallTarget(void* from)
2258    {
2259        return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2260    }
2261
2262    static void repatchInt32(void* where, int32_t value)
2263    {
2264        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2265
2266        setInt32(where, value, true);
2267    }
2268
2269    static void repatchCompact(void* where, int32_t offset)
2270    {
2271        ASSERT(offset >= -255 && offset <= 255);
2272
2273        bool add = true;
2274        if (offset < 0) {
2275            add = false;
2276            offset = -offset;
2277        }
2278
2279        offset |= (add << 9);
2280        offset |= (1 << 10);
2281        offset |= (1 << 11);
2282
2283        uint16_t* location = reinterpret_cast<uint16_t*>(where);
2284        location[1] &= ~((1 << 12) - 1);
2285        location[1] |= offset;
2286        cacheFlush(location, sizeof(uint16_t) * 2);
2287    }
2288
2289    static void repatchPointer(void* where, void* value)
2290    {
2291        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2292
2293        setPointer(where, value, true);
2294    }
2295
2296    static void* readPointer(void* where)
2297    {
2298        return reinterpret_cast<void*>(readInt32(where));
2299    }
2300
2301    static void replaceWithJump(void* instructionStart, void* to)
2302    {
2303        ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2304        ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2305
2306#if OS(LINUX)
2307        if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
2308            uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2309            linkJumpT4(ptr, to);
2310            cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2311        } else {
2312            uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
2313            linkBX(ptr, to);
2314            cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
2315        }
2316#else
2317        uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2318        linkJumpT4(ptr, to);
2319        cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2320#endif
2321    }
2322
2323    static ptrdiff_t maxJumpReplacementSize()
2324    {
2325#if OS(LINUX)
2326        return 10;
2327#else
2328        return 4;
2329#endif
2330    }
2331
2332    static void replaceWithLoad(void* instructionStart)
2333    {
2334        ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2335        uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2336        switch (ptr[0] & 0xFFF0) {
2337        case OP_LDR_imm_T3:
2338            break;
2339        case OP_ADD_imm_T3:
2340            ASSERT(!(ptr[1] & 0xF000));
2341            ptr[0] &= 0x000F;
2342            ptr[0] |= OP_LDR_imm_T3;
2343            ptr[1] |= (ptr[1] & 0x0F00) << 4;
2344            ptr[1] &= 0xF0FF;
2345            cacheFlush(ptr, sizeof(uint16_t) * 2);
2346            break;
2347        default:
2348            RELEASE_ASSERT_NOT_REACHED();
2349        }
2350    }
2351
2352    static void replaceWithAddressComputation(void* instructionStart)
2353    {
2354        ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2355        uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2356        switch (ptr[0] & 0xFFF0) {
2357        case OP_LDR_imm_T3:
2358            ASSERT(!(ptr[1] & 0x0F00));
2359            ptr[0] &= 0x000F;
2360            ptr[0] |= OP_ADD_imm_T3;
2361            ptr[1] |= (ptr[1] & 0xF000) >> 4;
2362            ptr[1] &= 0x0FFF;
2363            cacheFlush(ptr, sizeof(uint16_t) * 2);
2364            break;
2365        case OP_ADD_imm_T3:
2366            break;
2367        default:
2368            RELEASE_ASSERT_NOT_REACHED();
2369        }
2370    }
2371
2372    unsigned debugOffset() { return m_formatter.debugOffset(); }
2373
2374#if OS(LINUX)
2375    static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2376    {
2377        asm volatile(
2378            "push    {r7}\n"
2379            "mov     r0, %0\n"
2380            "mov     r1, %1\n"
2381            "movw    r7, #0x2\n"
2382            "movt    r7, #0xf\n"
2383            "movs    r2, #0x0\n"
2384            "svc     0x0\n"
2385            "pop     {r7}\n"
2386            :
2387            : "r" (begin), "r" (end)
2388            : "r0", "r1", "r2");
2389    }
2390#endif
2391
2392    static void cacheFlush(void* code, size_t size)
2393    {
2394#if OS(IOS)
2395        sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2396#elif OS(LINUX)
2397        size_t page = pageSize();
2398        uintptr_t current = reinterpret_cast<uintptr_t>(code);
2399        uintptr_t end = current + size;
2400        uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2401
2402        if (end <= firstPageEnd) {
2403            linuxPageFlush(current, end);
2404            return;
2405        }
2406
2407        linuxPageFlush(current, firstPageEnd);
2408
2409        for (current = firstPageEnd; current + page < end; current += page)
2410            linuxPageFlush(current, current + page);
2411
2412        linuxPageFlush(current, end);
2413#elif OS(WINCE)
2414        CacheRangeFlush(code, size, CACHE_SYNC_ALL);
2415#else
2416#error "The cacheFlush support is missing on this platform."
2417#endif
2418    }
2419
2420private:
2421    // VFP operations commonly take one or more 5-bit operands, typically representing a
2422    // floating point register number.  This will commonly be encoded in the instruction
2423    // in two parts, with one single bit field, and one 4-bit field.  In the case of
2424    // double precision operands the high bit of the register number will be encoded
2425    // separately, and for single precision operands the high bit of the register number
2426    // will be encoded individually.
2427    // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2428    // field to be encoded together in the instruction (the low 4-bits of a double
2429    // register number, or the high 4-bits of a single register number), and bit 4
2430    // contains the bit value to be encoded individually.
2431    struct VFPOperand {
2432        explicit VFPOperand(uint32_t value)
2433            : m_value(value)
2434        {
2435            ASSERT(!(m_value & ~0x1f));
2436        }
2437
2438        VFPOperand(FPDoubleRegisterID reg)
2439            : m_value(reg)
2440        {
2441        }
2442
2443        VFPOperand(RegisterID reg)
2444            : m_value(reg)
2445        {
2446        }
2447
2448        VFPOperand(FPSingleRegisterID reg)
2449            : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2450        {
2451        }
2452
2453        uint32_t bits1()
2454        {
2455            return m_value >> 4;
2456        }
2457
2458        uint32_t bits4()
2459        {
2460            return m_value & 0xf;
2461        }
2462
2463        uint32_t m_value;
2464    };
2465
2466    VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2467    {
2468        // Cannot specify rounding when converting to float.
2469        ASSERT(toInteger || !isRoundZero);
2470
2471        uint32_t op = 0x8;
2472        if (toInteger) {
2473            // opc2 indicates both toInteger & isUnsigned.
2474            op |= isUnsigned ? 0x4 : 0x5;
2475            // 'op' field in instruction is isRoundZero
2476            if (isRoundZero)
2477                op |= 0x10;
2478        } else {
2479            ASSERT(!isRoundZero);
2480            // 'op' field in instruction is isUnsigned
2481            if (!isUnsigned)
2482                op |= 0x10;
2483        }
2484        return VFPOperand(op);
2485    }
2486
2487    static void setInt32(void* code, uint32_t value, bool flush)
2488    {
2489        uint16_t* location = reinterpret_cast<uint16_t*>(code);
2490        ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2491
2492        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2493        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2494        location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2495        location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2496        location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2497        location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2498
2499        if (flush)
2500            cacheFlush(location - 4, 4 * sizeof(uint16_t));
2501    }
2502
2503    static int32_t readInt32(void* code)
2504    {
2505        uint16_t* location = reinterpret_cast<uint16_t*>(code);
2506        ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2507
2508        ARMThumbImmediate lo16;
2509        ARMThumbImmediate hi16;
2510        decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2511        decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2512        decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2513        decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2514        uint32_t result = hi16.asUInt16();
2515        result <<= 16;
2516        result |= lo16.asUInt16();
2517        return static_cast<int32_t>(result);
2518    }
2519
2520    static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2521    {
2522        // Requires us to have planted a LDR_imm_T1
2523        ASSERT(imm.isValid());
2524        ASSERT(imm.isUInt7());
2525        uint16_t* location = reinterpret_cast<uint16_t*>(code);
2526        location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2527        location[0] |= (imm.getUInt7() >> 2) << 6;
2528        cacheFlush(location, sizeof(uint16_t));
2529    }
2530
2531    static void setPointer(void* code, void* value, bool flush)
2532    {
2533        setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2534    }
2535
2536    static bool isB(void* address)
2537    {
2538        uint16_t* instruction = static_cast<uint16_t*>(address);
2539        return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2540    }
2541
2542    static bool isBX(void* address)
2543    {
2544        uint16_t* instruction = static_cast<uint16_t*>(address);
2545        return (instruction[0] & 0xff87) == OP_BX;
2546    }
2547
2548    static bool isMOV_imm_T3(void* address)
2549    {
2550        uint16_t* instruction = static_cast<uint16_t*>(address);
2551        return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2552    }
2553
2554    static bool isMOVT(void* address)
2555    {
2556        uint16_t* instruction = static_cast<uint16_t*>(address);
2557        return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2558    }
2559
2560    static bool isNOP_T1(void* address)
2561    {
2562        uint16_t* instruction = static_cast<uint16_t*>(address);
2563        return instruction[0] == OP_NOP_T1;
2564    }
2565
2566    static bool isNOP_T2(void* address)
2567    {
2568        uint16_t* instruction = static_cast<uint16_t*>(address);
2569        return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2570    }
2571
2572    static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2573    {
2574        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2575        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2576
2577        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2578        // It does not appear to be documented in the ARM ARM (big surprise), but
2579        // for OP_B_T1 the branch displacement encoded in the instruction is 2
2580        // less than the actual displacement.
2581        relative -= 2;
2582        return ((relative << 23) >> 23) == relative;
2583    }
2584
2585    static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2586    {
2587        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2588        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2589
2590        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2591        // It does not appear to be documented in the ARM ARM (big surprise), but
2592        // for OP_B_T2 the branch displacement encoded in the instruction is 2
2593        // less than the actual displacement.
2594        relative -= 2;
2595        return ((relative << 20) >> 20) == relative;
2596    }
2597
2598    static bool canBeJumpT3(const uint16_t* instruction, const void* target)
2599    {
2600        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2601        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2602
2603        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2604        return ((relative << 11) >> 11) == relative;
2605    }
2606
2607    static bool canBeJumpT4(const uint16_t* instruction, const void* target)
2608    {
2609        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2610        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2611
2612        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2613        return ((relative << 7) >> 7) == relative;
2614    }
2615
2616    static void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2617    {
2618        // FIMXE: this should be up in the MacroAssembler layer. :-(
2619        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2620        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2621        ASSERT(canBeJumpT1(instruction, target));
2622
2623        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2624        // It does not appear to be documented in the ARM ARM (big surprise), but
2625        // for OP_B_T1 the branch displacement encoded in the instruction is 2
2626        // less than the actual displacement.
2627        relative -= 2;
2628
2629        // All branch offsets should be an even distance.
2630        ASSERT(!(relative & 1));
2631        instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2632    }
2633
2634    static void linkJumpT2(uint16_t* instruction, void* target)
2635    {
2636        // FIMXE: this should be up in the MacroAssembler layer. :-(
2637        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2638        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2639        ASSERT(canBeJumpT2(instruction, target));
2640
2641        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2642        // It does not appear to be documented in the ARM ARM (big surprise), but
2643        // for OP_B_T2 the branch displacement encoded in the instruction is 2
2644        // less than the actual displacement.
2645        relative -= 2;
2646
2647        // All branch offsets should be an even distance.
2648        ASSERT(!(relative & 1));
2649        instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2650    }
2651
2652    static void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2653    {
2654        // FIMXE: this should be up in the MacroAssembler layer. :-(
2655        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2656        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2657        ASSERT(canBeJumpT3(instruction, target));
2658
2659        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2660
2661        // All branch offsets should be an even distance.
2662        ASSERT(!(relative & 1));
2663        instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2664        instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2665    }
2666
2667    static void linkJumpT4(uint16_t* instruction, void* target)
2668    {
2669        // FIMXE: this should be up in the MacroAssembler layer. :-(
2670        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2671        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2672        ASSERT(canBeJumpT4(instruction, target));
2673
2674        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2675        // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2676        if (relative >= 0)
2677            relative ^= 0xC00000;
2678
2679        // All branch offsets should be an even distance.
2680        ASSERT(!(relative & 1));
2681        instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2682        instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2683    }
2684
2685    static void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2686    {
2687        // FIMXE: this should be up in the MacroAssembler layer. :-(
2688        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2689        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2690
2691        instruction[-3] = ifThenElse(cond) | OP_IT;
2692        linkJumpT4(instruction, target);
2693    }
2694
2695    static void linkBX(uint16_t* instruction, void* target)
2696    {
2697        // FIMXE: this should be up in the MacroAssembler layer. :-(
2698        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2699        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2700
2701        const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2702        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2703        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2704        instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2705        instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2706        instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2707        instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2708        instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2709    }
2710
2711    static void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2712    {
2713        // FIMXE: this should be up in the MacroAssembler layer. :-(
2714        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2715        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2716
2717        linkBX(instruction, target);
2718        instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2719    }
2720
2721    static void linkJumpAbsolute(uint16_t* instruction, void* target)
2722    {
2723        // FIMXE: this should be up in the MacroAssembler layer. :-(
2724        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2725        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2726
2727        ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2728               || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2729
2730        if (canBeJumpT4(instruction, target)) {
2731            // There may be a better way to fix this, but right now put the NOPs first, since in the
2732            // case of an conditional branch this will be coming after an ITTT predicating *three*
2733            // instructions!  Looking backwards to modify the ITTT to an IT is not easy, due to
2734            // variable wdith encoding - the previous instruction might *look* like an ITTT but
2735            // actually be the second half of a 2-word op.
2736            instruction[-5] = OP_NOP_T1;
2737            instruction[-4] = OP_NOP_T2a;
2738            instruction[-3] = OP_NOP_T2b;
2739            linkJumpT4(instruction, target);
2740        } else {
2741            const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2742            ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2743            ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2744            instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2745            instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2746            instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2747            instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2748            instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2749        }
2750    }
2751
2752    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2753    {
2754        return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2755    }
2756
2757    static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2758    {
2759        result.m_value.i = (value >> 10) & 1;
2760        result.m_value.imm4 = value & 15;
2761    }
2762
2763    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2764    {
2765        return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2766    }
2767
2768    static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2769    {
2770        result.m_value.imm3 = (value >> 12) & 7;
2771        result.m_value.imm8 = value & 255;
2772    }
2773
2774    class ARMInstructionFormatter {
2775    public:
2776        ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2777        {
2778            m_buffer.putShort(op | (rd << 8) | imm);
2779        }
2780
2781        ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2782        {
2783            m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2784        }
2785
2786        ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2787        {
2788            m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2789        }
2790
2791        ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
2792        {
2793            m_buffer.putShort(op | imm);
2794        }
2795
2796        ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2797        {
2798            m_buffer.putShort(op | imm);
2799        }
2800
2801        ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2802        {
2803            m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2804        }
2805
2806        ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2807        {
2808            m_buffer.putShort(op | imm);
2809        }
2810
2811        ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2812        {
2813            m_buffer.putShort(op | (reg1 << 3) | reg2);
2814        }
2815
2816        ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2817        {
2818            m_buffer.putShort(op | reg);
2819            m_buffer.putShort(ff.m_u.value);
2820        }
2821
2822        ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2823        {
2824            m_buffer.putShort(op);
2825            m_buffer.putShort(ff.m_u.value);
2826        }
2827
2828        ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2829        {
2830            m_buffer.putShort(op1);
2831            m_buffer.putShort(op2);
2832        }
2833
2834        ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
2835        {
2836            m_buffer.putShort(op1);
2837            m_buffer.putShort(imm);
2838        }
2839
2840        ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2841        {
2842            ARMThumbImmediate newImm = imm;
2843            newImm.m_value.imm4 = imm4;
2844
2845            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2846            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2847        }
2848
2849        ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2850        {
2851            m_buffer.putShort(op | reg1);
2852            m_buffer.putShort((reg2 << 12) | imm);
2853        }
2854
2855        ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2856        {
2857            m_buffer.putShort(op | reg1);
2858            m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2859        }
2860
2861        // Formats up instructions of the pattern:
2862        //    111111111B11aaaa:bbbb222SA2C2cccc
2863        // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2864        // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2865        ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2866        {
2867            ASSERT(!(op1 & 0x004f));
2868            ASSERT(!(op2 & 0xf1af));
2869            m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2870            m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2871        }
2872
2873        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2874        // (i.e. +/-(0..255) 32-bit words)
2875        ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2876        {
2877            bool up = true;
2878            if (imm < 0) {
2879                imm = -imm;
2880                up = false;
2881            }
2882
2883            uint32_t offset = imm;
2884            ASSERT(!(offset & ~0x3fc));
2885            offset >>= 2;
2886
2887            m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2888            m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2889        }
2890
2891        // Administrative methods:
2892
2893        size_t codeSize() const { return m_buffer.codeSize(); }
2894        AssemblerLabel label() const { return m_buffer.label(); }
2895        bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2896        void* data() const { return m_buffer.data(); }
2897
2898        unsigned debugOffset() { return m_buffer.debugOffset(); }
2899
2900        AssemblerBuffer m_buffer;
2901    } m_formatter;
2902
2903    Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
2904    int m_indexOfLastWatchpoint;
2905    int m_indexOfTailOfLastWatchpoint;
2906};
2907
2908} // namespace JSC
2909
2910#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2911
2912#endif // ARMAssembler_h
2913