1/*
2 * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#ifndef ARMAssembler_h
28#define ARMAssembler_h
29
30#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
31
32#include "AssemblerBuffer.h"
33#include <wtf/Assertions.h>
34#include <wtf/Vector.h>
35#include <stdint.h>
36
37namespace JSC {
38
39namespace ARMRegisters {
40    typedef enum {
41        r0,
42        r1,
43        r2,
44        r3,
45        r4,
46        r5,
47        r6,
48        r7, wr = r7,   // thumb work register
49        r8,
50        r9, sb = r9,   // static base
51        r10, sl = r10, // stack limit
52        r11, fp = r11, // frame pointer
53        r12, ip = r12,
54        r13, sp = r13,
55        r14, lr = r14,
56        r15, pc = r15,
57    } RegisterID;
58
59    typedef enum {
60        s0,
61        s1,
62        s2,
63        s3,
64        s4,
65        s5,
66        s6,
67        s7,
68        s8,
69        s9,
70        s10,
71        s11,
72        s12,
73        s13,
74        s14,
75        s15,
76        s16,
77        s17,
78        s18,
79        s19,
80        s20,
81        s21,
82        s22,
83        s23,
84        s24,
85        s25,
86        s26,
87        s27,
88        s28,
89        s29,
90        s30,
91        s31,
92    } FPSingleRegisterID;
93
94    typedef enum {
95        d0,
96        d1,
97        d2,
98        d3,
99        d4,
100        d5,
101        d6,
102        d7,
103        d8,
104        d9,
105        d10,
106        d11,
107        d12,
108        d13,
109        d14,
110        d15,
111        d16,
112        d17,
113        d18,
114        d19,
115        d20,
116        d21,
117        d22,
118        d23,
119        d24,
120        d25,
121        d26,
122        d27,
123        d28,
124        d29,
125        d30,
126        d31,
127    } FPDoubleRegisterID;
128
129    typedef enum {
130        q0,
131        q1,
132        q2,
133        q3,
134        q4,
135        q5,
136        q6,
137        q7,
138        q8,
139        q9,
140        q10,
141        q11,
142        q12,
143        q13,
144        q14,
145        q15,
146        q16,
147        q17,
148        q18,
149        q19,
150        q20,
151        q21,
152        q22,
153        q23,
154        q24,
155        q25,
156        q26,
157        q27,
158        q28,
159        q29,
160        q30,
161        q31,
162    } FPQuadRegisterID;
163
164    inline FPSingleRegisterID asSingle(FPDoubleRegisterID reg)
165    {
166        ASSERT(reg < d16);
167        return (FPSingleRegisterID)(reg << 1);
168    }
169
170    inline FPDoubleRegisterID asDouble(FPSingleRegisterID reg)
171    {
172        ASSERT(!(reg & 1));
173        return (FPDoubleRegisterID)(reg >> 1);
174    }
175}
176
177class ARMv7Assembler;
178class ARMThumbImmediate {
179    friend class ARMv7Assembler;
180
181    typedef uint8_t ThumbImmediateType;
182    static const ThumbImmediateType TypeInvalid = 0;
183    static const ThumbImmediateType TypeEncoded = 1;
184    static const ThumbImmediateType TypeUInt16 = 2;
185
186    typedef union {
187        int16_t asInt;
188        struct {
189            unsigned imm8 : 8;
190            unsigned imm3 : 3;
191            unsigned i    : 1;
192            unsigned imm4 : 4;
193        };
194        // If this is an encoded immediate, then it may describe a shift, or a pattern.
195        struct {
196            unsigned shiftValue7 : 7;
197            unsigned shiftAmount : 5;
198        };
199        struct {
200            unsigned immediate   : 8;
201            unsigned pattern     : 4;
202        };
203    } ThumbImmediateValue;
204
205    // byte0 contains least significant bit; not using an array to make client code endian agnostic.
206    typedef union {
207        int32_t asInt;
208        struct {
209            uint8_t byte0;
210            uint8_t byte1;
211            uint8_t byte2;
212            uint8_t byte3;
213        };
214    } PatternBytes;
215
216    ALWAYS_INLINE static void countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
217    {
218        if (value & ~((1 << N) - 1)) /* check for any of the top N bits (of 2N bits) are set */
219            value >>= N;             /* if any were set, lose the bottom N */
220        else                         /* if none of the top N bits are set, */
221            zeros += N;              /* then we have identified N leading zeros */
222    }
223
224    static int32_t countLeadingZeros(uint32_t value)
225    {
226        if (!value)
227            return 32;
228
229        int32_t zeros = 0;
230        countLeadingZerosPartial(value, zeros, 16);
231        countLeadingZerosPartial(value, zeros, 8);
232        countLeadingZerosPartial(value, zeros, 4);
233        countLeadingZerosPartial(value, zeros, 2);
234        countLeadingZerosPartial(value, zeros, 1);
235        return zeros;
236    }
237
238    ARMThumbImmediate()
239        : m_type(TypeInvalid)
240    {
241        m_value.asInt = 0;
242    }
243
244    ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
245        : m_type(type)
246        , m_value(value)
247    {
248    }
249
250    ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
251        : m_type(TypeUInt16)
252    {
253        // Make sure this constructor is only reached with type TypeUInt16;
254        // this extra parameter makes the code a little clearer by making it
255        // explicit at call sites which type is being constructed
256        ASSERT_UNUSED(type, type == TypeUInt16);
257
258        m_value.asInt = value;
259    }
260
261public:
262    static ARMThumbImmediate makeEncodedImm(uint32_t value)
263    {
264        ThumbImmediateValue encoding;
265        encoding.asInt = 0;
266
267        // okay, these are easy.
268        if (value < 256) {
269            encoding.immediate = value;
270            encoding.pattern = 0;
271            return ARMThumbImmediate(TypeEncoded, encoding);
272        }
273
274        int32_t leadingZeros = countLeadingZeros(value);
275        // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
276        ASSERT(leadingZeros < 24);
277
278        // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
279        // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
280        // zero.  count(B) == 8, so the count of bits to be checked is 24 - count(Z).
281        int32_t rightShiftAmount = 24 - leadingZeros;
282        if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
283            // Shift the value down to the low byte position.  The assign to
284            // shiftValue7 drops the implicit top bit.
285            encoding.shiftValue7 = value >> rightShiftAmount;
286            // The endoded shift amount is the magnitude of a right rotate.
287            encoding.shiftAmount = 8 + leadingZeros;
288            return ARMThumbImmediate(TypeEncoded, encoding);
289        }
290
291        PatternBytes bytes;
292        bytes.asInt = value;
293
294        if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
295            encoding.immediate = bytes.byte0;
296            encoding.pattern = 3;
297            return ARMThumbImmediate(TypeEncoded, encoding);
298        }
299
300        if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
301            encoding.immediate = bytes.byte0;
302            encoding.pattern = 1;
303            return ARMThumbImmediate(TypeEncoded, encoding);
304        }
305
306        if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
307            encoding.immediate = bytes.byte1;
308            encoding.pattern = 2;
309            return ARMThumbImmediate(TypeEncoded, encoding);
310        }
311
312        return ARMThumbImmediate();
313    }
314
315    static ARMThumbImmediate makeUInt12(int32_t value)
316    {
317        return (!(value & 0xfffff000))
318            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
319            : ARMThumbImmediate();
320    }
321
322    static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
323    {
324        // If this is not a 12-bit unsigned it, try making an encoded immediate.
325        return (!(value & 0xfffff000))
326            ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
327            : makeEncodedImm(value);
328    }
329
330    // The 'make' methods, above, return a !isValid() value if the argument
331    // cannot be represented as the requested type.  This methods  is called
332    // 'get' since the argument can always be represented.
333    static ARMThumbImmediate makeUInt16(uint16_t value)
334    {
335        return ARMThumbImmediate(TypeUInt16, value);
336    }
337
338    bool isValid()
339    {
340        return m_type != TypeInvalid;
341    }
342
343    uint16_t asUInt16() const { return m_value.asInt; }
344
345    // These methods rely on the format of encoded byte values.
346    bool isUInt3() { return !(m_value.asInt & 0xfff8); }
347    bool isUInt4() { return !(m_value.asInt & 0xfff0); }
348    bool isUInt5() { return !(m_value.asInt & 0xffe0); }
349    bool isUInt6() { return !(m_value.asInt & 0xffc0); }
350    bool isUInt7() { return !(m_value.asInt & 0xff80); }
351    bool isUInt8() { return !(m_value.asInt & 0xff00); }
352    bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
353    bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
354    bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
355    bool isUInt16() { return m_type == TypeUInt16; }
356    uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
357    uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
358    uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
359    uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
360    uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
361    uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
362    uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
363    uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
364    uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
365    uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
366
367    bool isEncodedImm() { return m_type == TypeEncoded; }
368
369private:
370    ThumbImmediateType m_type;
371    ThumbImmediateValue m_value;
372};
373
374typedef enum {
375    SRType_LSL,
376    SRType_LSR,
377    SRType_ASR,
378    SRType_ROR,
379
380    SRType_RRX = SRType_ROR
381} ARMShiftType;
382
383class ShiftTypeAndAmount {
384    friend class ARMv7Assembler;
385
386public:
387    ShiftTypeAndAmount()
388    {
389        m_u.type = (ARMShiftType)0;
390        m_u.amount = 0;
391    }
392
393    ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
394    {
395        m_u.type = type;
396        m_u.amount = amount & 31;
397    }
398
399    unsigned lo4() { return m_u.lo4; }
400    unsigned hi4() { return m_u.hi4; }
401
402private:
403    union {
404        struct {
405            unsigned lo4 : 4;
406            unsigned hi4 : 4;
407        };
408        struct {
409            unsigned type   : 2;
410            unsigned amount : 6;
411        };
412    } m_u;
413};
414
415class ARMv7Assembler {
416public:
417    typedef ARMRegisters::RegisterID RegisterID;
418    typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID;
419    typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID;
420    typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
421
422    // (HS, LO, HI, LS) -> (AE, B, A, BE)
423    // (VS, VC) -> (O, NO)
424    typedef enum {
425        ConditionEQ, // Zero / Equal.
426        ConditionNE, // Non-zero / Not equal.
427        ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same.
428        ConditionLO, ConditionCC = ConditionLO, // Unsigned lower.
429        ConditionMI, // Negative.
430        ConditionPL, // Positive or zero.
431        ConditionVS, // Overflowed.
432        ConditionVC, // Not overflowed.
433        ConditionHI, // Unsigned higher.
434        ConditionLS, // Unsigned lower or same.
435        ConditionGE, // Signed greater than or equal.
436        ConditionLT, // Signed less than.
437        ConditionGT, // Signed greater than.
438        ConditionLE, // Signed less than or equal.
439        ConditionAL, // Unconditional / Always execute.
440        ConditionInvalid
441    } Condition;
442
443#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index))
444#define JUMP_ENUM_SIZE(jump) ((jump) >> 3)
445    enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0),
446                    JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)),
447                    JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)),
448                    JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)),
449                    JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t))
450    };
451    enum JumpLinkType {
452        LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0),
453        LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)),
454        LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)),
455        LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)),
456        LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)),
457        LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)),
458        LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)),
459        LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t))
460    };
461
462    class LinkRecord {
463    public:
464        LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
465        {
466            data.realTypes.m_from = from;
467            data.realTypes.m_to = to;
468            data.realTypes.m_type = type;
469            data.realTypes.m_linkType = LinkInvalid;
470            data.realTypes.m_condition = condition;
471        }
472        void operator=(const LinkRecord& other)
473        {
474            data.copyTypes.content[0] = other.data.copyTypes.content[0];
475            data.copyTypes.content[1] = other.data.copyTypes.content[1];
476            data.copyTypes.content[2] = other.data.copyTypes.content[2];
477        }
478        intptr_t from() const { return data.realTypes.m_from; }
479        void setFrom(intptr_t from) { data.realTypes.m_from = from; }
480        intptr_t to() const { return data.realTypes.m_to; }
481        JumpType type() const { return data.realTypes.m_type; }
482        JumpLinkType linkType() const { return data.realTypes.m_linkType; }
483        void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; }
484        Condition condition() const { return data.realTypes.m_condition; }
485    private:
486        union {
487            struct RealTypes {
488                intptr_t m_from : 31;
489                intptr_t m_to : 31;
490                JumpType m_type : 8;
491                JumpLinkType m_linkType : 8;
492                Condition m_condition : 16;
493            } realTypes;
494            struct CopyTypes {
495                uint32_t content[3];
496            } copyTypes;
497            COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct);
498        } data;
499    };
500
501    ARMv7Assembler()
502        : m_indexOfLastWatchpoint(INT_MIN)
503        , m_indexOfTailOfLastWatchpoint(INT_MIN)
504    {
505    }
506
507private:
508
509    // ARMv7, Appx-A.6.3
510    static bool BadReg(RegisterID reg)
511    {
512        return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc);
513    }
514
515    uint32_t singleRegisterMask(FPSingleRegisterID rdNum, int highBitsShift, int lowBitShift)
516    {
517        uint32_t rdMask = (rdNum >> 1) << highBitsShift;
518        if (rdNum & 1)
519            rdMask |= 1 << lowBitShift;
520        return rdMask;
521    }
522
523    uint32_t doubleRegisterMask(FPDoubleRegisterID rdNum, int highBitShift, int lowBitsShift)
524    {
525        uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
526        if (rdNum & 16)
527            rdMask |= 1 << highBitShift;
528        return rdMask;
529    }
530
531    typedef enum {
532        OP_ADD_reg_T1       = 0x1800,
533        OP_SUB_reg_T1       = 0x1A00,
534        OP_ADD_imm_T1       = 0x1C00,
535        OP_SUB_imm_T1       = 0x1E00,
536        OP_MOV_imm_T1       = 0x2000,
537        OP_CMP_imm_T1       = 0x2800,
538        OP_ADD_imm_T2       = 0x3000,
539        OP_SUB_imm_T2       = 0x3800,
540        OP_AND_reg_T1       = 0x4000,
541        OP_EOR_reg_T1       = 0x4040,
542        OP_TST_reg_T1       = 0x4200,
543        OP_RSB_imm_T1       = 0x4240,
544        OP_CMP_reg_T1       = 0x4280,
545        OP_ORR_reg_T1       = 0x4300,
546        OP_MVN_reg_T1       = 0x43C0,
547        OP_ADD_reg_T2       = 0x4400,
548        OP_MOV_reg_T1       = 0x4600,
549        OP_BLX              = 0x4700,
550        OP_BX               = 0x4700,
551        OP_STR_reg_T1       = 0x5000,
552        OP_STRH_reg_T1      = 0x5200,
553        OP_STRB_reg_T1      = 0x5400,
554        OP_LDRSB_reg_T1     = 0x5600,
555        OP_LDR_reg_T1       = 0x5800,
556        OP_LDRH_reg_T1      = 0x5A00,
557        OP_LDRB_reg_T1      = 0x5C00,
558        OP_LDRSH_reg_T1     = 0x5E00,
559        OP_STR_imm_T1       = 0x6000,
560        OP_LDR_imm_T1       = 0x6800,
561        OP_STRB_imm_T1      = 0x7000,
562        OP_LDRB_imm_T1      = 0x7800,
563        OP_STRH_imm_T1      = 0x8000,
564        OP_LDRH_imm_T1      = 0x8800,
565        OP_STR_imm_T2       = 0x9000,
566        OP_LDR_imm_T2       = 0x9800,
567        OP_ADD_SP_imm_T1    = 0xA800,
568        OP_ADD_SP_imm_T2    = 0xB000,
569        OP_SUB_SP_imm_T1    = 0xB080,
570        OP_BKPT             = 0xBE00,
571        OP_IT               = 0xBF00,
572        OP_NOP_T1           = 0xBF00,
573    } OpcodeID;
574
575    typedef enum {
576        OP_B_T1         = 0xD000,
577        OP_B_T2         = 0xE000,
578        OP_AND_reg_T2   = 0xEA00,
579        OP_TST_reg_T2   = 0xEA10,
580        OP_ORR_reg_T2   = 0xEA40,
581        OP_ORR_S_reg_T2 = 0xEA50,
582        OP_ASR_imm_T1   = 0xEA4F,
583        OP_LSL_imm_T1   = 0xEA4F,
584        OP_LSR_imm_T1   = 0xEA4F,
585        OP_ROR_imm_T1   = 0xEA4F,
586        OP_MVN_reg_T2   = 0xEA6F,
587        OP_EOR_reg_T2   = 0xEA80,
588        OP_ADD_reg_T3   = 0xEB00,
589        OP_ADD_S_reg_T3 = 0xEB10,
590        OP_SUB_reg_T2   = 0xEBA0,
591        OP_SUB_S_reg_T2 = 0xEBB0,
592        OP_CMP_reg_T2   = 0xEBB0,
593        OP_VMOV_CtoD    = 0xEC00,
594        OP_VMOV_DtoC    = 0xEC10,
595        OP_FSTS         = 0xED00,
596        OP_VSTR         = 0xED00,
597        OP_FLDS         = 0xED10,
598        OP_VLDR         = 0xED10,
599        OP_VMOV_CtoS    = 0xEE00,
600        OP_VMOV_StoC    = 0xEE10,
601        OP_VMUL_T2      = 0xEE20,
602        OP_VADD_T2      = 0xEE30,
603        OP_VSUB_T2      = 0xEE30,
604        OP_VDIV         = 0xEE80,
605        OP_VABS_T2      = 0xEEB0,
606        OP_VCMP         = 0xEEB0,
607        OP_VCVT_FPIVFP  = 0xEEB0,
608        OP_VMOV_T2      = 0xEEB0,
609        OP_VMOV_IMM_T2  = 0xEEB0,
610        OP_VMRS         = 0xEEB0,
611        OP_VNEG_T2      = 0xEEB0,
612        OP_VSQRT_T1     = 0xEEB0,
613        OP_VCVTSD_T1    = 0xEEB0,
614        OP_VCVTDS_T1    = 0xEEB0,
615        OP_B_T3a        = 0xF000,
616        OP_B_T4a        = 0xF000,
617        OP_AND_imm_T1   = 0xF000,
618        OP_TST_imm      = 0xF010,
619        OP_ORR_imm_T1   = 0xF040,
620        OP_MOV_imm_T2   = 0xF040,
621        OP_MVN_imm      = 0xF060,
622        OP_EOR_imm_T1   = 0xF080,
623        OP_ADD_imm_T3   = 0xF100,
624        OP_ADD_S_imm_T3 = 0xF110,
625        OP_CMN_imm      = 0xF110,
626        OP_ADC_imm      = 0xF140,
627        OP_SUB_imm_T3   = 0xF1A0,
628        OP_SUB_S_imm_T3 = 0xF1B0,
629        OP_CMP_imm_T2   = 0xF1B0,
630        OP_RSB_imm_T2   = 0xF1C0,
631        OP_RSB_S_imm_T2 = 0xF1D0,
632        OP_ADD_imm_T4   = 0xF200,
633        OP_MOV_imm_T3   = 0xF240,
634        OP_SUB_imm_T4   = 0xF2A0,
635        OP_MOVT         = 0xF2C0,
636        OP_UBFX_T1      = 0xF3C0,
637        OP_NOP_T2a      = 0xF3AF,
638        OP_STRB_imm_T3  = 0xF800,
639        OP_STRB_reg_T2  = 0xF800,
640        OP_LDRB_imm_T3  = 0xF810,
641        OP_LDRB_reg_T2  = 0xF810,
642        OP_STRH_imm_T3  = 0xF820,
643        OP_STRH_reg_T2  = 0xF820,
644        OP_LDRH_reg_T2  = 0xF830,
645        OP_LDRH_imm_T3  = 0xF830,
646        OP_STR_imm_T4   = 0xF840,
647        OP_STR_reg_T2   = 0xF840,
648        OP_LDR_imm_T4   = 0xF850,
649        OP_LDR_reg_T2   = 0xF850,
650        OP_STRB_imm_T2  = 0xF880,
651        OP_LDRB_imm_T2  = 0xF890,
652        OP_STRH_imm_T2  = 0xF8A0,
653        OP_LDRH_imm_T2  = 0xF8B0,
654        OP_STR_imm_T3   = 0xF8C0,
655        OP_LDR_imm_T3   = 0xF8D0,
656        OP_LDRSB_reg_T2 = 0xF910,
657        OP_LDRSH_reg_T2 = 0xF930,
658        OP_LSL_reg_T2   = 0xFA00,
659        OP_LSR_reg_T2   = 0xFA20,
660        OP_ASR_reg_T2   = 0xFA40,
661        OP_ROR_reg_T2   = 0xFA60,
662        OP_CLZ          = 0xFAB0,
663        OP_SMULL_T1     = 0xFB80,
664#if CPU(APPLE_ARMV7S)
665        OP_SDIV_T1      = 0xFB90,
666        OP_UDIV_T1      = 0xFBB0,
667#endif
668    } OpcodeID1;
669
670    typedef enum {
671        OP_VADD_T2b     = 0x0A00,
672        OP_VDIVb        = 0x0A00,
673        OP_FLDSb        = 0x0A00,
674        OP_VLDRb        = 0x0A00,
675        OP_VMOV_IMM_T2b = 0x0A00,
676        OP_VMOV_T2b     = 0x0A40,
677        OP_VMUL_T2b     = 0x0A00,
678        OP_FSTSb        = 0x0A00,
679        OP_VSTRb        = 0x0A00,
680        OP_VMOV_StoCb   = 0x0A10,
681        OP_VMOV_CtoSb   = 0x0A10,
682        OP_VMOV_DtoCb   = 0x0A10,
683        OP_VMOV_CtoDb   = 0x0A10,
684        OP_VMRSb        = 0x0A10,
685        OP_VABS_T2b     = 0x0A40,
686        OP_VCMPb        = 0x0A40,
687        OP_VCVT_FPIVFPb = 0x0A40,
688        OP_VNEG_T2b     = 0x0A40,
689        OP_VSUB_T2b     = 0x0A40,
690        OP_VSQRT_T1b    = 0x0A40,
691        OP_VCVTSD_T1b   = 0x0A40,
692        OP_VCVTDS_T1b   = 0x0A40,
693        OP_NOP_T2b      = 0x8000,
694        OP_B_T3b        = 0x8000,
695        OP_B_T4b        = 0x9000,
696    } OpcodeID2;
697
698    struct FourFours {
699        FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
700        {
701            m_u.f0 = f0;
702            m_u.f1 = f1;
703            m_u.f2 = f2;
704            m_u.f3 = f3;
705        }
706
707        union {
708            unsigned value;
709            struct {
710                unsigned f0 : 4;
711                unsigned f1 : 4;
712                unsigned f2 : 4;
713                unsigned f3 : 4;
714            };
715        } m_u;
716    };
717
718    class ARMInstructionFormatter;
719
720    // false means else!
721    bool ifThenElseConditionBit(Condition condition, bool isIf)
722    {
723        return isIf ? (condition & 1) : !(condition & 1);
724    }
725    uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
726    {
727        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
728            | (ifThenElseConditionBit(condition, inst3if) << 2)
729            | (ifThenElseConditionBit(condition, inst4if) << 1)
730            | 1;
731        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
732        return (condition << 4) | mask;
733    }
734    uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
735    {
736        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
737            | (ifThenElseConditionBit(condition, inst3if) << 2)
738            | 2;
739        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
740        return (condition << 4) | mask;
741    }
742    uint8_t ifThenElse(Condition condition, bool inst2if)
743    {
744        int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
745            | 4;
746        ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
747        return (condition << 4) | mask;
748    }
749
750    uint8_t ifThenElse(Condition condition)
751    {
752        int mask = 8;
753        return (condition << 4) | mask;
754    }
755
756public:
757
758    void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
759    {
760        // Rd can only be SP if Rn is also SP.
761        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
762        ASSERT(rd != ARMRegisters::pc);
763        ASSERT(rn != ARMRegisters::pc);
764        ASSERT(imm.isEncodedImm());
765
766        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm);
767    }
768
769    void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
770    {
771        // Rd can only be SP if Rn is also SP.
772        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
773        ASSERT(rd != ARMRegisters::pc);
774        ASSERT(rn != ARMRegisters::pc);
775        ASSERT(imm.isValid());
776
777        if (rn == ARMRegisters::sp) {
778            ASSERT(!(imm.getUInt16() & 3));
779            if (!(rd & 8) && imm.isUInt10()) {
780                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
781                return;
782            } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) {
783                m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast<uint8_t>(imm.getUInt9() >> 2));
784                return;
785            }
786        } else if (!((rd | rn) & 8)) {
787            if (imm.isUInt3()) {
788                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
789                return;
790            } else if ((rd == rn) && imm.isUInt8()) {
791                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
792                return;
793            }
794        }
795
796        if (imm.isEncodedImm())
797            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
798        else {
799            ASSERT(imm.isUInt12());
800            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
801        }
802    }
803
804    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
805    {
806        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
807        ASSERT(rd != ARMRegisters::pc);
808        ASSERT(rn != ARMRegisters::pc);
809        ASSERT(!BadReg(rm));
810        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
811    }
812
813    // NOTE: In an IT block, add doesn't modify the flags register.
814    ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
815    {
816        if (rd == rn)
817            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
818        else if (rd == rm)
819            m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
820        else if (!((rd | rn | rm) & 8))
821            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
822        else
823            add(rd, rn, rm, ShiftTypeAndAmount());
824    }
825
826    // Not allowed in an IT (if then) block.
827    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
828    {
829        // Rd can only be SP if Rn is also SP.
830        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
831        ASSERT(rd != ARMRegisters::pc);
832        ASSERT(rn != ARMRegisters::pc);
833        ASSERT(imm.isEncodedImm());
834
835        if (!((rd | rn) & 8)) {
836            if (imm.isUInt3()) {
837                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
838                return;
839            } else if ((rd == rn) && imm.isUInt8()) {
840                m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
841                return;
842            }
843        }
844
845        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
846    }
847
848    // Not allowed in an IT (if then) block?
849    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
850    {
851        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
852        ASSERT(rd != ARMRegisters::pc);
853        ASSERT(rn != ARMRegisters::pc);
854        ASSERT(!BadReg(rm));
855        m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
856    }
857
858    // Not allowed in an IT (if then) block.
859    ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
860    {
861        if (!((rd | rn | rm) & 8))
862            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
863        else
864            add_S(rd, rn, rm, ShiftTypeAndAmount());
865    }
866
867    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
868    {
869        ASSERT(!BadReg(rd));
870        ASSERT(!BadReg(rn));
871        ASSERT(imm.isEncodedImm());
872        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
873    }
874
875    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
876    {
877        ASSERT(!BadReg(rd));
878        ASSERT(!BadReg(rn));
879        ASSERT(!BadReg(rm));
880        m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
881    }
882
883    ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
884    {
885        if ((rd == rn) && !((rd | rm) & 8))
886            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
887        else if ((rd == rm) && !((rd | rn) & 8))
888            m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
889        else
890            ARM_and(rd, rn, rm, ShiftTypeAndAmount());
891    }
892
893    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
894    {
895        ASSERT(!BadReg(rd));
896        ASSERT(!BadReg(rm));
897        ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
898        m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
899    }
900
901    ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm)
902    {
903        ASSERT(!BadReg(rd));
904        ASSERT(!BadReg(rn));
905        ASSERT(!BadReg(rm));
906        m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
907    }
908
909    // Only allowed in IT (if then) block if last instruction.
910    ALWAYS_INLINE AssemblerLabel b()
911    {
912        m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
913        return m_formatter.label();
914    }
915
916    // Only allowed in IT (if then) block if last instruction.
917    ALWAYS_INLINE AssemblerLabel blx(RegisterID rm)
918    {
919        ASSERT(rm != ARMRegisters::pc);
920        m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
921        return m_formatter.label();
922    }
923
924    // Only allowed in IT (if then) block if last instruction.
925    ALWAYS_INLINE AssemblerLabel bx(RegisterID rm)
926    {
927        m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
928        return m_formatter.label();
929    }
930
931    void bkpt(uint8_t imm = 0)
932    {
933        m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
934    }
935
936    ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm)
937    {
938        ASSERT(!BadReg(rd));
939        ASSERT(!BadReg(rm));
940        m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm));
941    }
942
943    ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm)
944    {
945        ASSERT(rn != ARMRegisters::pc);
946        ASSERT(imm.isEncodedImm());
947
948        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
949    }
950
951    ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm)
952    {
953        ASSERT(rn != ARMRegisters::pc);
954        ASSERT(imm.isEncodedImm());
955
956        if (!(rn & 8) && imm.isUInt8())
957            m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
958        else
959            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
960    }
961
962    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
963    {
964        ASSERT(rn != ARMRegisters::pc);
965        ASSERT(!BadReg(rm));
966        m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
967    }
968
969    ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm)
970    {
971        if ((rn | rm) & 8)
972            cmp(rn, rm, ShiftTypeAndAmount());
973        else
974            m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
975    }
976
977    // xor is not spelled with an 'e'. :-(
978    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
979    {
980        ASSERT(!BadReg(rd));
981        ASSERT(!BadReg(rn));
982        ASSERT(imm.isEncodedImm());
983        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
984    }
985
986    // xor is not spelled with an 'e'. :-(
987    ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
988    {
989        ASSERT(!BadReg(rd));
990        ASSERT(!BadReg(rn));
991        ASSERT(!BadReg(rm));
992        m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
993    }
994
995    // xor is not spelled with an 'e'. :-(
996    void eor(RegisterID rd, RegisterID rn, RegisterID rm)
997    {
998        if ((rd == rn) && !((rd | rm) & 8))
999            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
1000        else if ((rd == rm) && !((rd | rn) & 8))
1001            m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
1002        else
1003            eor(rd, rn, rm, ShiftTypeAndAmount());
1004    }
1005
1006    ALWAYS_INLINE void it(Condition cond)
1007    {
1008        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
1009    }
1010
1011    ALWAYS_INLINE void it(Condition cond, bool inst2if)
1012    {
1013        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
1014    }
1015
1016    ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if)
1017    {
1018        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
1019    }
1020
1021    ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
1022    {
1023        m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
1024    }
1025
1026    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1027    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1028    {
1029        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1030        ASSERT(imm.isUInt12());
1031
1032        if (!((rt | rn) & 8) && imm.isUInt7())
1033            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1034        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1035            m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1036        else
1037            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
1038    }
1039
1040    ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate)
1041    {
1042        ASSERT(rn != ARMRegisters::pc);
1043        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate);
1044    }
1045
1046    ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1047    {
1048        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1049        ASSERT(imm.isUInt7());
1050        ASSERT(!((rt | rn) & 8));
1051        m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1052    }
1053
1054    // If index is set, this is a regular offset or a pre-indexed load;
1055    // if index is not set then is is a post-index load.
1056    //
1057    // If wback is set rn is updated - this is a pre or post index load,
1058    // if wback is not set this is a regular offset memory access.
1059    //
1060    // (-255 <= offset <= 255)
1061    // _reg = REG[rn]
1062    // _tmp = _reg + offset
1063    // MEM[index ? _tmp : _reg] = REG[rt]
1064    // if (wback) REG[rn] = _tmp
1065    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1066    {
1067        ASSERT(rt != ARMRegisters::pc);
1068        ASSERT(rn != ARMRegisters::pc);
1069        ASSERT(index || wback);
1070        ASSERT(!wback | (rt != rn));
1071
1072        bool add = true;
1073        if (offset < 0) {
1074            add = false;
1075            offset = -offset;
1076        }
1077        ASSERT((offset & ~0xff) == 0);
1078
1079        offset |= (wback << 8);
1080        offset |= (add   << 9);
1081        offset |= (index << 10);
1082        offset |= (1 << 11);
1083
1084        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
1085    }
1086
1087    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1088    ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1089    {
1090        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1091        ASSERT(!BadReg(rm));
1092        ASSERT(shift <= 3);
1093
1094        if (!shift && !((rt | rn | rm) & 8))
1095            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
1096        else
1097            m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1098    }
1099
1100    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1101    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1102    {
1103        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1104        ASSERT(imm.isUInt12());
1105
1106        if (!((rt | rn) & 8) && imm.isUInt6())
1107            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1108        else
1109            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1110    }
1111
1112    // If index is set, this is a regular offset or a pre-indexed load;
1113    // if index is not set then is is a post-index load.
1114    //
1115    // If wback is set rn is updated - this is a pre or post index load,
1116    // if wback is not set this is a regular offset memory access.
1117    //
1118    // (-255 <= offset <= 255)
1119    // _reg = REG[rn]
1120    // _tmp = _reg + offset
1121    // MEM[index ? _tmp : _reg] = REG[rt]
1122    // if (wback) REG[rn] = _tmp
1123    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1124    {
1125        ASSERT(rt != ARMRegisters::pc);
1126        ASSERT(rn != ARMRegisters::pc);
1127        ASSERT(index || wback);
1128        ASSERT(!wback | (rt != rn));
1129
1130        bool add = true;
1131        if (offset < 0) {
1132            add = false;
1133            offset = -offset;
1134        }
1135        ASSERT((offset & ~0xff) == 0);
1136
1137        offset |= (wback << 8);
1138        offset |= (add   << 9);
1139        offset |= (index << 10);
1140        offset |= (1 << 11);
1141
1142        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1143    }
1144
1145    ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1146    {
1147        ASSERT(!BadReg(rt));   // Memory hint
1148        ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
1149        ASSERT(!BadReg(rm));
1150        ASSERT(shift <= 3);
1151
1152        if (!shift && !((rt | rn | rm) & 8))
1153            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1154        else
1155            m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1156    }
1157
1158    void ldrb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1159    {
1160        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1161        ASSERT(imm.isUInt12());
1162
1163        if (!((rt | rn) & 8) && imm.isUInt5())
1164            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRB_imm_T1, imm.getUInt5(), rn, rt);
1165        else
1166            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T2, rn, rt, imm.getUInt12());
1167    }
1168
1169    void ldrb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1170    {
1171        ASSERT(rt != ARMRegisters::pc);
1172        ASSERT(rn != ARMRegisters::pc);
1173        ASSERT(index || wback);
1174        ASSERT(!wback | (rt != rn));
1175
1176        bool add = true;
1177        if (offset < 0) {
1178            add = false;
1179            offset = -offset;
1180        }
1181
1182        ASSERT(!(offset & ~0xff));
1183
1184        offset |= (wback << 8);
1185        offset |= (add   << 9);
1186        offset |= (index << 10);
1187        offset |= (1 << 11);
1188
1189        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset);
1190    }
1191
1192    ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1193    {
1194        ASSERT(rn != ARMRegisters::pc); // LDR (literal)
1195        ASSERT(!BadReg(rm));
1196        ASSERT(shift <= 3);
1197
1198        if (!shift && !((rt | rn | rm) & 8))
1199            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRB_reg_T1, rm, rn, rt);
1200        else
1201            m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1202    }
1203
1204    void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1205    {
1206        ASSERT(rn != ARMRegisters::pc);
1207        ASSERT(!BadReg(rm));
1208        ASSERT(shift <= 3);
1209
1210        if (!shift && !((rt | rn | rm) & 8))
1211            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt);
1212        else
1213            m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1214    }
1215
1216    void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1217    {
1218        ASSERT(rn != ARMRegisters::pc);
1219        ASSERT(!BadReg(rm));
1220        ASSERT(shift <= 3);
1221
1222        if (!shift && !((rt | rn | rm) & 8))
1223            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt);
1224        else
1225            m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1226    }
1227
1228    void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1229    {
1230        ASSERT(!BadReg(rd));
1231        ASSERT(!BadReg(rm));
1232        ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1233        m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1234    }
1235
1236    ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1237    {
1238        ASSERT(!BadReg(rd));
1239        ASSERT(!BadReg(rn));
1240        ASSERT(!BadReg(rm));
1241        m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1242    }
1243
1244    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1245    {
1246        ASSERT(!BadReg(rd));
1247        ASSERT(!BadReg(rm));
1248        ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1249        m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1250    }
1251
1252    ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1253    {
1254        ASSERT(!BadReg(rd));
1255        ASSERT(!BadReg(rn));
1256        ASSERT(!BadReg(rm));
1257        m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1258    }
1259
1260    ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm)
1261    {
1262        ASSERT(imm.isValid());
1263        ASSERT(!imm.isEncodedImm());
1264        ASSERT(!BadReg(rd));
1265
1266        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1267    }
1268
1269#if OS(LINUX) || OS(QNX)
1270    static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm)
1271    {
1272        uint16_t* address = static_cast<uint16_t*>(instructionStart);
1273        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
1274        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
1275        address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
1276        address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
1277        address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
1278        address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
1279        address[4] = OP_CMP_reg_T2 | left;
1280        cacheFlush(address, sizeof(uint16_t) * 5);
1281    }
1282#else
1283    static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm)
1284    {
1285        ASSERT(imm.isValid());
1286        ASSERT(!imm.isEncodedImm());
1287        ASSERT(!BadReg(rd));
1288
1289        uint16_t* address = static_cast<uint16_t*>(instructionStart);
1290        address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
1291        address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
1292        cacheFlush(address, sizeof(uint16_t) * 2);
1293    }
1294#endif
1295
1296    ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm)
1297    {
1298        ASSERT(imm.isValid());
1299        ASSERT(!BadReg(rd));
1300
1301        if ((rd < 8) && imm.isUInt8())
1302            m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1303        else if (imm.isEncodedImm())
1304            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1305        else
1306            movT3(rd, imm);
1307    }
1308
1309    ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm)
1310    {
1311        m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1312    }
1313
1314    ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm)
1315    {
1316        ASSERT(imm.isUInt16());
1317        ASSERT(!BadReg(rd));
1318        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1319    }
1320
1321    ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm)
1322    {
1323        ASSERT(imm.isEncodedImm());
1324        ASSERT(!BadReg(rd));
1325
1326        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1327    }
1328
1329    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1330    {
1331        ASSERT(!BadReg(rd));
1332        ASSERT(!BadReg(rm));
1333        m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1334    }
1335
1336    ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm)
1337    {
1338        if (!((rd | rm) & 8))
1339            m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1340        else
1341            mvn(rd, rm, ShiftTypeAndAmount());
1342    }
1343
1344    ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm)
1345    {
1346        ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
1347        sub(rd, zero, rm);
1348    }
1349
1350    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1351    {
1352        ASSERT(!BadReg(rd));
1353        ASSERT(!BadReg(rn));
1354        ASSERT(imm.isEncodedImm());
1355        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1356    }
1357
1358    ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1359    {
1360        ASSERT(!BadReg(rd));
1361        ASSERT(!BadReg(rn));
1362        ASSERT(!BadReg(rm));
1363        m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1364    }
1365
1366    void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1367    {
1368        if ((rd == rn) && !((rd | rm) & 8))
1369            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1370        else if ((rd == rm) && !((rd | rn) & 8))
1371            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1372        else
1373            orr(rd, rn, rm, ShiftTypeAndAmount());
1374    }
1375
1376    ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1377    {
1378        ASSERT(!BadReg(rd));
1379        ASSERT(!BadReg(rn));
1380        ASSERT(!BadReg(rm));
1381        m_formatter.twoWordOp12Reg4FourFours(OP_ORR_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1382    }
1383
1384    void orr_S(RegisterID rd, RegisterID rn, RegisterID rm)
1385    {
1386        if ((rd == rn) && !((rd | rm) & 8))
1387            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1388        else if ((rd == rm) && !((rd | rn) & 8))
1389            m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1390        else
1391            orr_S(rd, rn, rm, ShiftTypeAndAmount());
1392    }
1393
1394    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1395    {
1396        ASSERT(!BadReg(rd));
1397        ASSERT(!BadReg(rm));
1398        ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1399        m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1400    }
1401
1402    ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1403    {
1404        ASSERT(!BadReg(rd));
1405        ASSERT(!BadReg(rn));
1406        ASSERT(!BadReg(rm));
1407        m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1408    }
1409
1410#if CPU(APPLE_ARMV7S)
1411    ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
1412    {
1413        ASSERT(!BadReg(rd));
1414        ASSERT(!BadReg(rn));
1415        ASSERT(!BadReg(rm));
1416        m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1417    }
1418#endif
1419
1420    ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1421    {
1422        ASSERT(!BadReg(rdLo));
1423        ASSERT(!BadReg(rdHi));
1424        ASSERT(!BadReg(rn));
1425        ASSERT(!BadReg(rm));
1426        ASSERT(rdLo != rdHi);
1427        m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1428    }
1429
1430    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1431    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1432    {
1433        ASSERT(rt != ARMRegisters::pc);
1434        ASSERT(rn != ARMRegisters::pc);
1435        ASSERT(imm.isUInt12());
1436
1437        if (!((rt | rn) & 8) && imm.isUInt7())
1438            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1439        else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10())
1440            m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast<uint8_t>(imm.getUInt10() >> 2));
1441        else
1442            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1443    }
1444
1445    // If index is set, this is a regular offset or a pre-indexed store;
1446    // if index is not set then is is a post-index store.
1447    //
1448    // If wback is set rn is updated - this is a pre or post index store,
1449    // if wback is not set this is a regular offset memory access.
1450    //
1451    // (-255 <= offset <= 255)
1452    // _reg = REG[rn]
1453    // _tmp = _reg + offset
1454    // MEM[index ? _tmp : _reg] = REG[rt]
1455    // if (wback) REG[rn] = _tmp
1456    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1457    {
1458        ASSERT(rt != ARMRegisters::pc);
1459        ASSERT(rn != ARMRegisters::pc);
1460        ASSERT(index || wback);
1461        ASSERT(!wback | (rt != rn));
1462
1463        bool add = true;
1464        if (offset < 0) {
1465            add = false;
1466            offset = -offset;
1467        }
1468        ASSERT((offset & ~0xff) == 0);
1469
1470        offset |= (wback << 8);
1471        offset |= (add   << 9);
1472        offset |= (index << 10);
1473        offset |= (1 << 11);
1474
1475        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1476    }
1477
1478    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1479    ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1480    {
1481        ASSERT(rn != ARMRegisters::pc);
1482        ASSERT(!BadReg(rm));
1483        ASSERT(shift <= 3);
1484
1485        if (!shift && !((rt | rn | rm) & 8))
1486            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1487        else
1488            m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1489    }
1490
1491    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1492    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1493    {
1494        ASSERT(rt != ARMRegisters::pc);
1495        ASSERT(rn != ARMRegisters::pc);
1496        ASSERT(imm.isUInt12());
1497
1498        if (!((rt | rn) & 8) && imm.isUInt7())
1499            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt);
1500        else
1501            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12());
1502    }
1503
1504    // If index is set, this is a regular offset or a pre-indexed store;
1505    // if index is not set then is is a post-index store.
1506    //
1507    // If wback is set rn is updated - this is a pre or post index store,
1508    // if wback is not set this is a regular offset memory access.
1509    //
1510    // (-255 <= offset <= 255)
1511    // _reg = REG[rn]
1512    // _tmp = _reg + offset
1513    // MEM[index ? _tmp : _reg] = REG[rt]
1514    // if (wback) REG[rn] = _tmp
1515    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1516    {
1517        ASSERT(rt != ARMRegisters::pc);
1518        ASSERT(rn != ARMRegisters::pc);
1519        ASSERT(index || wback);
1520        ASSERT(!wback | (rt != rn));
1521
1522        bool add = true;
1523        if (offset < 0) {
1524            add = false;
1525            offset = -offset;
1526        }
1527        ASSERT((offset & ~0xff) == 0);
1528
1529        offset |= (wback << 8);
1530        offset |= (add   << 9);
1531        offset |= (index << 10);
1532        offset |= (1 << 11);
1533
1534        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset);
1535    }
1536
1537    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1538    ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1539    {
1540        ASSERT(rn != ARMRegisters::pc);
1541        ASSERT(!BadReg(rm));
1542        ASSERT(shift <= 3);
1543
1544        if (!shift && !((rt | rn | rm) & 8))
1545            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt);
1546        else
1547            m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm));
1548    }
1549
1550    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1551    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1552    {
1553        ASSERT(rt != ARMRegisters::pc);
1554        ASSERT(rn != ARMRegisters::pc);
1555        ASSERT(imm.isUInt12());
1556
1557        if (!((rt | rn) & 8) && imm.isUInt7())
1558            m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
1559        else
1560            m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
1561    }
1562
1563    // If index is set, this is a regular offset or a pre-indexed store;
1564    // if index is not set then is is a post-index store.
1565    //
1566    // If wback is set rn is updated - this is a pre or post index store,
1567    // if wback is not set this is a regular offset memory access.
1568    //
1569    // (-255 <= offset <= 255)
1570    // _reg = REG[rn]
1571    // _tmp = _reg + offset
1572    // MEM[index ? _tmp : _reg] = REG[rt]
1573    // if (wback) REG[rn] = _tmp
1574    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1575    {
1576        ASSERT(rt != ARMRegisters::pc);
1577        ASSERT(rn != ARMRegisters::pc);
1578        ASSERT(index || wback);
1579        ASSERT(!wback | (rt != rn));
1580
1581        bool add = true;
1582        if (offset < 0) {
1583            add = false;
1584            offset = -offset;
1585        }
1586        ASSERT(!(offset & ~0xff));
1587
1588        offset |= (wback << 8);
1589        offset |= (add   << 9);
1590        offset |= (index << 10);
1591        offset |= (1 << 11);
1592
1593        m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset);
1594    }
1595
1596    // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
1597    ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0)
1598    {
1599        ASSERT(rn != ARMRegisters::pc);
1600        ASSERT(!BadReg(rm));
1601        ASSERT(shift <= 3);
1602
1603        if (!shift && !((rt | rn | rm) & 8))
1604            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt);
1605        else
1606            m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1607    }
1608
1609    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1610    {
1611        // Rd can only be SP if Rn is also SP.
1612        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1613        ASSERT(rd != ARMRegisters::pc);
1614        ASSERT(rn != ARMRegisters::pc);
1615        ASSERT(imm.isValid());
1616
1617        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1618            ASSERT(!(imm.getUInt16() & 3));
1619            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1620            return;
1621        } else if (!((rd | rn) & 8)) {
1622            if (imm.isUInt3()) {
1623                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1624                return;
1625            } else if ((rd == rn) && imm.isUInt8()) {
1626                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1627                return;
1628            }
1629        }
1630
1631        if (imm.isEncodedImm())
1632            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1633        else {
1634            ASSERT(imm.isUInt12());
1635            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1636        }
1637    }
1638
1639    ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1640    {
1641        ASSERT(rd != ARMRegisters::pc);
1642        ASSERT(rn != ARMRegisters::pc);
1643        ASSERT(imm.isValid());
1644        ASSERT(imm.isUInt12());
1645
1646        if (!((rd | rn) & 8) && !imm.getUInt12())
1647            m_formatter.oneWordOp10Reg3Reg3(OP_RSB_imm_T1, rn, rd);
1648        else
1649            m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm);
1650    }
1651
1652    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1653    {
1654        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1655        ASSERT(rd != ARMRegisters::pc);
1656        ASSERT(rn != ARMRegisters::pc);
1657        ASSERT(!BadReg(rm));
1658        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1659    }
1660
1661    // NOTE: In an IT block, add doesn't modify the flags register.
1662    ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1663    {
1664        if (!((rd | rn | rm) & 8))
1665            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1666        else
1667            sub(rd, rn, rm, ShiftTypeAndAmount());
1668    }
1669
1670    // Not allowed in an IT (if then) block.
1671    void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1672    {
1673        // Rd can only be SP if Rn is also SP.
1674        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1675        ASSERT(rd != ARMRegisters::pc);
1676        ASSERT(rn != ARMRegisters::pc);
1677        ASSERT(imm.isValid());
1678
1679        if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
1680            ASSERT(!(imm.getUInt16() & 3));
1681            m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast<uint8_t>(imm.getUInt9() >> 2));
1682            return;
1683        } else if (!((rd | rn) & 8)) {
1684            if (imm.isUInt3()) {
1685                m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1686                return;
1687            } else if ((rd == rn) && imm.isUInt8()) {
1688                m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1689                return;
1690            }
1691        }
1692
1693        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1694    }
1695
1696    ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn)
1697    {
1698        ASSERT(rd != ARMRegisters::pc);
1699        ASSERT(rn != ARMRegisters::pc);
1700        ASSERT(imm.isValid());
1701        ASSERT(imm.isUInt12());
1702
1703        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm);
1704    }
1705
1706    // Not allowed in an IT (if then) block?
1707    ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1708    {
1709        ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
1710        ASSERT(rd != ARMRegisters::pc);
1711        ASSERT(rn != ARMRegisters::pc);
1712        ASSERT(!BadReg(rm));
1713        m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1714    }
1715
1716    // Not allowed in an IT (if then) block.
1717    ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1718    {
1719        if (!((rd | rn | rm) & 8))
1720            m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1721        else
1722            sub_S(rd, rn, rm, ShiftTypeAndAmount());
1723    }
1724
1725    ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm)
1726    {
1727        ASSERT(!BadReg(rn));
1728        ASSERT(imm.isEncodedImm());
1729
1730        m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1731    }
1732
1733    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1734    {
1735        ASSERT(!BadReg(rn));
1736        ASSERT(!BadReg(rm));
1737        m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1738    }
1739
1740    ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm)
1741    {
1742        if ((rn | rm) & 8)
1743            tst(rn, rm, ShiftTypeAndAmount());
1744        else
1745            m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1746    }
1747
1748    ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width)
1749    {
1750        ASSERT(lsb < 32);
1751        ASSERT((width >= 1) && (width <= 32));
1752        ASSERT((lsb + width) <= 32);
1753        m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
1754    }
1755
1756#if CPU(APPLE_ARMV7S)
1757    ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
1758    {
1759        ASSERT(!BadReg(rd));
1760        ASSERT(!BadReg(rn));
1761        ASSERT(!BadReg(rm));
1762        m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm));
1763    }
1764#endif
1765
1766    void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1767    {
1768        m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm);
1769    }
1770
1771    void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1772    {
1773        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm);
1774    }
1775
1776    void vcmpz(FPDoubleRegisterID rd)
1777    {
1778        m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0));
1779    }
1780
1781    void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1782    {
1783        // boolean values are 64bit (toInt, unsigned, roundZero)
1784        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm);
1785    }
1786
1787    void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1788    {
1789        // boolean values are 64bit (toInt, unsigned, roundZero)
1790        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm);
1791    }
1792
1793    void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1794    {
1795        // boolean values are 64bit (toInt, unsigned, roundZero)
1796        m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm);
1797    }
1798
1799    void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1800    {
1801        m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm);
1802    }
1803
1804    void vldr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1805    {
1806        m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm);
1807    }
1808
1809    void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1810    {
1811        m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm);
1812    }
1813
1814    void vmov(RegisterID rd, FPSingleRegisterID rn)
1815    {
1816        ASSERT(!BadReg(rd));
1817        m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0));
1818    }
1819
1820    void vmov(FPSingleRegisterID rd, RegisterID rn)
1821    {
1822        ASSERT(!BadReg(rn));
1823        m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0));
1824    }
1825
1826    void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn)
1827    {
1828        ASSERT(!BadReg(rd1));
1829        ASSERT(!BadReg(rd2));
1830        m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn);
1831    }
1832
1833    void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2)
1834    {
1835        ASSERT(!BadReg(rn1));
1836        ASSERT(!BadReg(rn2));
1837        m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd);
1838    }
1839
1840    void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn)
1841    {
1842        m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn);
1843    }
1844
1845    void vmrs(RegisterID reg = ARMRegisters::pc)
1846    {
1847        ASSERT(reg != ARMRegisters::sp);
1848        m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0));
1849    }
1850
1851    void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1852    {
1853        m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm);
1854    }
1855
1856    void vstr(FPDoubleRegisterID rd, RegisterID rn, int32_t imm)
1857    {
1858        m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm);
1859    }
1860
1861    void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm)
1862    {
1863        m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm);
1864    }
1865
1866    void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm)
1867    {
1868        m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm);
1869    }
1870
1871    void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1872    {
1873        m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm);
1874    }
1875
1876    void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1877    {
1878        m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm);
1879    }
1880
1881    void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm)
1882    {
1883        m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm);
1884    }
1885
1886    void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm)
1887    {
1888        m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm);
1889    }
1890
1891    void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm)
1892    {
1893        m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm);
1894    }
1895
1896    void nop()
1897    {
1898        m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0);
1899    }
1900
1901    void nopw()
1902    {
1903        m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
1904    }
1905
1906    AssemblerLabel labelIgnoringWatchpoints()
1907    {
1908        return m_formatter.label();
1909    }
1910
1911    AssemblerLabel labelForWatchpoint()
1912    {
1913        AssemblerLabel result = m_formatter.label();
1914        if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
1915            result = label();
1916        m_indexOfLastWatchpoint = result.m_offset;
1917        m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
1918        return result;
1919    }
1920
1921    AssemblerLabel label()
1922    {
1923        AssemblerLabel result = m_formatter.label();
1924        while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
1925            if (UNLIKELY(static_cast<int>(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint))
1926                nopw();
1927            else
1928                nop();
1929            result = m_formatter.label();
1930        }
1931        return result;
1932    }
1933
1934    AssemblerLabel align(int alignment)
1935    {
1936        while (!m_formatter.isAligned(alignment))
1937            bkpt();
1938
1939        return label();
1940    }
1941
1942    static void* getRelocatedAddress(void* code, AssemblerLabel label)
1943    {
1944        ASSERT(label.isSet());
1945        return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
1946    }
1947
1948    static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
1949    {
1950        return b.m_offset - a.m_offset;
1951    }
1952
1953    int executableOffsetFor(int location)
1954    {
1955        if (!location)
1956            return 0;
1957        return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
1958    }
1959
1960    int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
1961
1962    // Assembler admin methods:
1963
1964    static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
1965    {
1966        return a.from() < b.from();
1967    }
1968
1969    bool canCompact(JumpType jumpType)
1970    {
1971        // The following cannot be compacted:
1972        //   JumpFixed: represents custom jump sequence
1973        //   JumpNoConditionFixedSize: represents unconditional jump that must remain a fixed size
1974        //   JumpConditionFixedSize: represents conditional jump that must remain a fixed size
1975        return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
1976    }
1977
1978    JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
1979    {
1980        if (jumpType == JumpFixed)
1981            return LinkInvalid;
1982
1983        // for patchable jump we must leave space for the longest code sequence
1984        if (jumpType == JumpNoConditionFixedSize)
1985            return LinkBX;
1986        if (jumpType == JumpConditionFixedSize)
1987            return LinkConditionalBX;
1988
1989        const int paddingSize = JUMP_ENUM_SIZE(jumpType);
1990
1991        if (jumpType == JumpCondition) {
1992            // 2-byte conditional T1
1993            const uint16_t* jumpT1Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1)));
1994            if (canBeJumpT1(jumpT1Location, to))
1995                return LinkJumpT1;
1996            // 4-byte conditional T3
1997            const uint16_t* jumpT3Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3)));
1998            if (canBeJumpT3(jumpT3Location, to))
1999                return LinkJumpT3;
2000            // 4-byte conditional T4 with IT
2001            const uint16_t* conditionalJumpT4Location =
2002            reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4)));
2003            if (canBeJumpT4(conditionalJumpT4Location, to))
2004                return LinkConditionalJumpT4;
2005        } else {
2006            // 2-byte unconditional T2
2007            const uint16_t* jumpT2Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2)));
2008            if (canBeJumpT2(jumpT2Location, to))
2009                return LinkJumpT2;
2010            // 4-byte unconditional T4
2011            const uint16_t* jumpT4Location = reinterpret_cast_ptr<const uint16_t*>(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4)));
2012            if (canBeJumpT4(jumpT4Location, to))
2013                return LinkJumpT4;
2014            // use long jump sequence
2015            return LinkBX;
2016        }
2017
2018        ASSERT(jumpType == JumpCondition);
2019        return LinkConditionalBX;
2020    }
2021
2022    JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
2023    {
2024        JumpLinkType linkType = computeJumpType(record.type(), from, to);
2025        record.setLinkType(linkType);
2026        return linkType;
2027    }
2028
2029    void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
2030    {
2031        int32_t ptr = regionStart / sizeof(int32_t);
2032        const int32_t end = regionEnd / sizeof(int32_t);
2033        int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
2034        while (ptr < end)
2035            offsets[ptr++] = offset;
2036    }
2037
2038    Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
2039    {
2040        std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
2041        return m_jumpsToLink;
2042    }
2043
2044    void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
2045    {
2046        switch (record.linkType()) {
2047        case LinkJumpT1:
2048            linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2049            break;
2050        case LinkJumpT2:
2051            linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
2052            break;
2053        case LinkJumpT3:
2054            linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2055            break;
2056        case LinkJumpT4:
2057            linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
2058            break;
2059        case LinkConditionalJumpT4:
2060            linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2061            break;
2062        case LinkConditionalBX:
2063            linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
2064            break;
2065        case LinkBX:
2066            linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
2067            break;
2068        default:
2069            RELEASE_ASSERT_NOT_REACHED();
2070            break;
2071        }
2072    }
2073
2074    void* unlinkedCode() { return m_formatter.data(); }
2075    size_t codeSize() const { return m_formatter.codeSize(); }
2076
2077    static unsigned getCallReturnOffset(AssemblerLabel call)
2078    {
2079        ASSERT(call.isSet());
2080        return call.m_offset;
2081    }
2082
2083    // Linking & patching:
2084    //
2085    // 'link' and 'patch' methods are for use on unprotected code - such as the code
2086    // within the AssemblerBuffer, and code being patched by the patch buffer.  Once
2087    // code has been finalized it is (platform support permitting) within a non-
2088    // writable region of memory; to modify the code in an execute-only execuable
2089    // pool the 'repatch' and 'relink' methods should be used.
2090
2091    void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition)
2092    {
2093        ASSERT(to.isSet());
2094        ASSERT(from.isSet());
2095        m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition));
2096    }
2097
2098    static void linkJump(void* code, AssemblerLabel from, void* to)
2099    {
2100        ASSERT(from.isSet());
2101
2102        uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
2103        linkJumpAbsolute(location, to);
2104    }
2105
2106    static void linkCall(void* code, AssemblerLabel from, void* to)
2107    {
2108        ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
2109        ASSERT(from.isSet());
2110        ASSERT(reinterpret_cast<intptr_t>(to) & 1);
2111
2112        setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to, false);
2113    }
2114
2115    static void linkPointer(void* code, AssemblerLabel where, void* value)
2116    {
2117        setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
2118    }
2119
2120    static void relinkJump(void* from, void* to)
2121    {
2122        ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2123        ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
2124
2125        linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
2126
2127        cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
2128    }
2129
2130    static void relinkCall(void* from, void* to)
2131    {
2132        ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
2133        ASSERT(reinterpret_cast<intptr_t>(to) & 1);
2134
2135        setPointer(reinterpret_cast<uint16_t*>(from) - 1, to, true);
2136    }
2137
2138    static void* readCallTarget(void* from)
2139    {
2140        return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
2141    }
2142
2143    static void repatchInt32(void* where, int32_t value)
2144    {
2145        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2146
2147        setInt32(where, value, true);
2148    }
2149
2150    static void repatchCompact(void* where, int32_t offset)
2151    {
2152        ASSERT(offset >= -255 && offset <= 255);
2153
2154        bool add = true;
2155        if (offset < 0) {
2156            add = false;
2157            offset = -offset;
2158        }
2159
2160        offset |= (add << 9);
2161        offset |= (1 << 10);
2162        offset |= (1 << 11);
2163
2164        uint16_t* location = reinterpret_cast<uint16_t*>(where);
2165        location[1] &= ~((1 << 12) - 1);
2166        location[1] |= offset;
2167        cacheFlush(location, sizeof(uint16_t) * 2);
2168    }
2169
2170    static void repatchPointer(void* where, void* value)
2171    {
2172        ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
2173
2174        setPointer(where, value, true);
2175    }
2176
2177    static void* readPointer(void* where)
2178    {
2179        return reinterpret_cast<void*>(readInt32(where));
2180    }
2181
2182    static void replaceWithJump(void* instructionStart, void* to)
2183    {
2184        ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2185        ASSERT(!(bitwise_cast<uintptr_t>(to) & 1));
2186
2187#if OS(LINUX) || OS(QNX)
2188        if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
2189            uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2190            linkJumpT4(ptr, to);
2191            cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2192        } else {
2193            uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
2194            linkBX(ptr, to);
2195            cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
2196        }
2197#else
2198        uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
2199        linkJumpT4(ptr, to);
2200        cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
2201#endif
2202    }
2203
2204    static ptrdiff_t maxJumpReplacementSize()
2205    {
2206#if OS(LINUX) || OS(QNX)
2207        return 10;
2208#else
2209        return 4;
2210#endif
2211    }
2212
2213    static void replaceWithLoad(void* instructionStart)
2214    {
2215        ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2216        uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2217        switch (ptr[0] & 0xFFF0) {
2218        case OP_LDR_imm_T3:
2219            break;
2220        case OP_ADD_imm_T3:
2221            ASSERT(!(ptr[1] & 0xF000));
2222            ptr[0] &= 0x000F;
2223            ptr[0] |= OP_LDR_imm_T3;
2224            ptr[1] |= (ptr[1] & 0x0F00) << 4;
2225            ptr[1] &= 0xF0FF;
2226            cacheFlush(ptr, sizeof(uint16_t) * 2);
2227            break;
2228        default:
2229            RELEASE_ASSERT_NOT_REACHED();
2230        }
2231    }
2232
2233    static void replaceWithAddressComputation(void* instructionStart)
2234    {
2235        ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
2236        uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
2237        switch (ptr[0] & 0xFFF0) {
2238        case OP_LDR_imm_T3:
2239            ASSERT(!(ptr[1] & 0x0F00));
2240            ptr[0] &= 0x000F;
2241            ptr[0] |= OP_ADD_imm_T3;
2242            ptr[1] |= (ptr[1] & 0xF000) >> 4;
2243            ptr[1] &= 0x0FFF;
2244            cacheFlush(ptr, sizeof(uint16_t) * 2);
2245            break;
2246        case OP_ADD_imm_T3:
2247            break;
2248        default:
2249            RELEASE_ASSERT_NOT_REACHED();
2250        }
2251    }
2252
2253    unsigned debugOffset() { return m_formatter.debugOffset(); }
2254
2255#if OS(LINUX)
2256    static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
2257    {
2258        asm volatile(
2259            "push    {r7}\n"
2260            "mov     r0, %0\n"
2261            "mov     r1, %1\n"
2262            "movw    r7, #0x2\n"
2263            "movt    r7, #0xf\n"
2264            "movs    r2, #0x0\n"
2265            "svc     0x0\n"
2266            "pop     {r7}\n"
2267            :
2268            : "r" (begin), "r" (end)
2269            : "r0", "r1", "r2");
2270    }
2271#endif
2272
2273    static void cacheFlush(void* code, size_t size)
2274    {
2275#if OS(IOS)
2276        sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
2277#elif OS(LINUX)
2278        size_t page = pageSize();
2279        uintptr_t current = reinterpret_cast<uintptr_t>(code);
2280        uintptr_t end = current + size;
2281        uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
2282
2283        if (end <= firstPageEnd) {
2284            linuxPageFlush(current, end);
2285            return;
2286        }
2287
2288        linuxPageFlush(current, firstPageEnd);
2289
2290        for (current = firstPageEnd; current + page < end; current += page)
2291            linuxPageFlush(current, current + page);
2292
2293        linuxPageFlush(current, end);
2294#elif OS(WINCE)
2295        CacheRangeFlush(code, size, CACHE_SYNC_ALL);
2296#elif OS(QNX)
2297#if !ENABLE(ASSEMBLER_WX_EXCLUSIVE)
2298        msync(code, size, MS_INVALIDATE_ICACHE);
2299#else
2300        UNUSED_PARAM(code);
2301        UNUSED_PARAM(size);
2302#endif
2303#else
2304#error "The cacheFlush support is missing on this platform."
2305#endif
2306    }
2307
2308private:
2309    // VFP operations commonly take one or more 5-bit operands, typically representing a
2310    // floating point register number.  This will commonly be encoded in the instruction
2311    // in two parts, with one single bit field, and one 4-bit field.  In the case of
2312    // double precision operands the high bit of the register number will be encoded
2313    // separately, and for single precision operands the high bit of the register number
2314    // will be encoded individually.
2315    // VFPOperand encapsulates a 5-bit VFP operand, with bits 0..3 containing the 4-bit
2316    // field to be encoded together in the instruction (the low 4-bits of a double
2317    // register number, or the high 4-bits of a single register number), and bit 4
2318    // contains the bit value to be encoded individually.
2319    struct VFPOperand {
2320        explicit VFPOperand(uint32_t value)
2321            : m_value(value)
2322        {
2323            ASSERT(!(m_value & ~0x1f));
2324        }
2325
2326        VFPOperand(FPDoubleRegisterID reg)
2327            : m_value(reg)
2328        {
2329        }
2330
2331        VFPOperand(RegisterID reg)
2332            : m_value(reg)
2333        {
2334        }
2335
2336        VFPOperand(FPSingleRegisterID reg)
2337            : m_value(((reg & 1) << 4) | (reg >> 1)) // rotate the lowest bit of 'reg' to the top.
2338        {
2339        }
2340
2341        uint32_t bits1()
2342        {
2343            return m_value >> 4;
2344        }
2345
2346        uint32_t bits4()
2347        {
2348            return m_value & 0xf;
2349        }
2350
2351        uint32_t m_value;
2352    };
2353
2354    VFPOperand vcvtOp(bool toInteger, bool isUnsigned, bool isRoundZero)
2355    {
2356        // Cannot specify rounding when converting to float.
2357        ASSERT(toInteger || !isRoundZero);
2358
2359        uint32_t op = 0x8;
2360        if (toInteger) {
2361            // opc2 indicates both toInteger & isUnsigned.
2362            op |= isUnsigned ? 0x4 : 0x5;
2363            // 'op' field in instruction is isRoundZero
2364            if (isRoundZero)
2365                op |= 0x10;
2366        } else {
2367            ASSERT(!isRoundZero);
2368            // 'op' field in instruction is isUnsigned
2369            if (!isUnsigned)
2370                op |= 0x10;
2371        }
2372        return VFPOperand(op);
2373    }
2374
2375    static void setInt32(void* code, uint32_t value, bool flush)
2376    {
2377        uint16_t* location = reinterpret_cast<uint16_t*>(code);
2378        ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2379
2380        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
2381        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
2382        location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2383        location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
2384        location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2385        location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
2386
2387        if (flush)
2388            cacheFlush(location - 4, 4 * sizeof(uint16_t));
2389    }
2390
2391    static int32_t readInt32(void* code)
2392    {
2393        uint16_t* location = reinterpret_cast<uint16_t*>(code);
2394        ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
2395
2396        ARMThumbImmediate lo16;
2397        ARMThumbImmediate hi16;
2398        decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]);
2399        decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]);
2400        decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]);
2401        decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]);
2402        uint32_t result = hi16.asUInt16();
2403        result <<= 16;
2404        result |= lo16.asUInt16();
2405        return static_cast<int32_t>(result);
2406    }
2407
2408    static void setUInt7ForLoad(void* code, ARMThumbImmediate imm)
2409    {
2410        // Requires us to have planted a LDR_imm_T1
2411        ASSERT(imm.isValid());
2412        ASSERT(imm.isUInt7());
2413        uint16_t* location = reinterpret_cast<uint16_t*>(code);
2414        location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
2415        location[0] |= (imm.getUInt7() >> 2) << 6;
2416        cacheFlush(location, sizeof(uint16_t));
2417    }
2418
2419    static void setPointer(void* code, void* value, bool flush)
2420    {
2421        setInt32(code, reinterpret_cast<uint32_t>(value), flush);
2422    }
2423
2424    static bool isB(void* address)
2425    {
2426        uint16_t* instruction = static_cast<uint16_t*>(address);
2427        return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
2428    }
2429
2430    static bool isBX(void* address)
2431    {
2432        uint16_t* instruction = static_cast<uint16_t*>(address);
2433        return (instruction[0] & 0xff87) == OP_BX;
2434    }
2435
2436    static bool isMOV_imm_T3(void* address)
2437    {
2438        uint16_t* instruction = static_cast<uint16_t*>(address);
2439        return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
2440    }
2441
2442    static bool isMOVT(void* address)
2443    {
2444        uint16_t* instruction = static_cast<uint16_t*>(address);
2445        return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
2446    }
2447
2448    static bool isNOP_T1(void* address)
2449    {
2450        uint16_t* instruction = static_cast<uint16_t*>(address);
2451        return instruction[0] == OP_NOP_T1;
2452    }
2453
2454    static bool isNOP_T2(void* address)
2455    {
2456        uint16_t* instruction = static_cast<uint16_t*>(address);
2457        return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
2458    }
2459
2460    static bool canBeJumpT1(const uint16_t* instruction, const void* target)
2461    {
2462        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2463        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2464
2465        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2466        // It does not appear to be documented in the ARM ARM (big surprise), but
2467        // for OP_B_T1 the branch displacement encoded in the instruction is 2
2468        // less than the actual displacement.
2469        relative -= 2;
2470        return ((relative << 23) >> 23) == relative;
2471    }
2472
2473    static bool canBeJumpT2(const uint16_t* instruction, const void* target)
2474    {
2475        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2476        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2477
2478        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2479        // It does not appear to be documented in the ARM ARM (big surprise), but
2480        // for OP_B_T2 the branch displacement encoded in the instruction is 2
2481        // less than the actual displacement.
2482        relative -= 2;
2483        return ((relative << 20) >> 20) == relative;
2484    }
2485
2486    static bool canBeJumpT3(const uint16_t* instruction, const void* target)
2487    {
2488        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2489        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2490
2491        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2492        return ((relative << 11) >> 11) == relative;
2493    }
2494
2495    static bool canBeJumpT4(const uint16_t* instruction, const void* target)
2496    {
2497        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2498        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2499
2500        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2501        return ((relative << 7) >> 7) == relative;
2502    }
2503
2504    void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
2505    {
2506        // FIMXE: this should be up in the MacroAssembler layer. :-(
2507        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2508        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2509        ASSERT(canBeJumpT1(instruction, target));
2510
2511        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2512        // It does not appear to be documented in the ARM ARM (big surprise), but
2513        // for OP_B_T1 the branch displacement encoded in the instruction is 2
2514        // less than the actual displacement.
2515        relative -= 2;
2516
2517        // All branch offsets should be an even distance.
2518        ASSERT(!(relative & 1));
2519        instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
2520    }
2521
2522    static void linkJumpT2(uint16_t* instruction, void* target)
2523    {
2524        // FIMXE: this should be up in the MacroAssembler layer. :-(
2525        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2526        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2527        ASSERT(canBeJumpT2(instruction, target));
2528
2529        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2530        // It does not appear to be documented in the ARM ARM (big surprise), but
2531        // for OP_B_T2 the branch displacement encoded in the instruction is 2
2532        // less than the actual displacement.
2533        relative -= 2;
2534
2535        // All branch offsets should be an even distance.
2536        ASSERT(!(relative & 1));
2537        instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
2538    }
2539
2540    void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
2541    {
2542        // FIMXE: this should be up in the MacroAssembler layer. :-(
2543        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2544        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2545        ASSERT(canBeJumpT3(instruction, target));
2546
2547        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2548
2549        // All branch offsets should be an even distance.
2550        ASSERT(!(relative & 1));
2551        instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
2552        instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
2553    }
2554
2555    static void linkJumpT4(uint16_t* instruction, void* target)
2556    {
2557        // FIMXE: this should be up in the MacroAssembler layer. :-(
2558        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2559        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2560        ASSERT(canBeJumpT4(instruction, target));
2561
2562        intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
2563        // ARM encoding for the top two bits below the sign bit is 'peculiar'.
2564        if (relative >= 0)
2565            relative ^= 0xC00000;
2566
2567        // All branch offsets should be an even distance.
2568        ASSERT(!(relative & 1));
2569        instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
2570        instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
2571    }
2572
2573    void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
2574    {
2575        // FIMXE: this should be up in the MacroAssembler layer. :-(
2576        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2577        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2578
2579        instruction[-3] = ifThenElse(cond) | OP_IT;
2580        linkJumpT4(instruction, target);
2581    }
2582
2583    static void linkBX(uint16_t* instruction, void* target)
2584    {
2585        // FIMXE: this should be up in the MacroAssembler layer. :-(
2586        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2587        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2588
2589        const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2590        ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2591        ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2592        instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2593        instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2594        instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2595        instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2596        instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2597    }
2598
2599    void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
2600    {
2601        // FIMXE: this should be up in the MacroAssembler layer. :-(
2602        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2603        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2604
2605        linkBX(instruction, target);
2606        instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
2607    }
2608
2609    static void linkJumpAbsolute(uint16_t* instruction, void* target)
2610    {
2611        // FIMXE: this should be up in the MacroAssembler layer. :-(
2612        ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
2613        ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
2614
2615        ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
2616               || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
2617
2618        if (canBeJumpT4(instruction, target)) {
2619            // There may be a better way to fix this, but right now put the NOPs first, since in the
2620            // case of an conditional branch this will be coming after an ITTT predicating *three*
2621            // instructions!  Looking backwards to modify the ITTT to an IT is not easy, due to
2622            // variable wdith encoding - the previous instruction might *look* like an ITTT but
2623            // actually be the second half of a 2-word op.
2624            instruction[-5] = OP_NOP_T1;
2625            instruction[-4] = OP_NOP_T2a;
2626            instruction[-3] = OP_NOP_T2b;
2627            linkJumpT4(instruction, target);
2628        } else {
2629            const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
2630            ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
2631            ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
2632            instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
2633            instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
2634            instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
2635            instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
2636            instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
2637        }
2638    }
2639
2640    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmFirst(uint16_t op, ARMThumbImmediate imm)
2641    {
2642        return op | (imm.m_value.i << 10) | imm.m_value.imm4;
2643    }
2644
2645    static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value)
2646    {
2647        result.m_value.i = (value >> 10) & 1;
2648        result.m_value.imm4 = value & 15;
2649    }
2650
2651    static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
2652    {
2653        return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
2654    }
2655
2656    static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value)
2657    {
2658        result.m_value.imm3 = (value >> 12) & 7;
2659        result.m_value.imm8 = value & 255;
2660    }
2661
2662    class ARMInstructionFormatter {
2663    public:
2664        ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
2665        {
2666            m_buffer.putShort(op | (rd << 8) | imm);
2667        }
2668
2669        ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
2670        {
2671            m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
2672        }
2673
2674        ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
2675        {
2676            m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
2677        }
2678
2679        ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
2680        {
2681            m_buffer.putShort(op | imm);
2682        }
2683
2684        ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
2685        {
2686            m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
2687        }
2688
2689        ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
2690        {
2691            m_buffer.putShort(op | imm);
2692        }
2693
2694        ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
2695        {
2696            m_buffer.putShort(op | (reg1 << 3) | reg2);
2697        }
2698
2699        ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
2700        {
2701            m_buffer.putShort(op | reg);
2702            m_buffer.putShort(ff.m_u.value);
2703        }
2704
2705        ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
2706        {
2707            m_buffer.putShort(op);
2708            m_buffer.putShort(ff.m_u.value);
2709        }
2710
2711        ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
2712        {
2713            m_buffer.putShort(op1);
2714            m_buffer.putShort(op2);
2715        }
2716
2717        ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
2718        {
2719            ARMThumbImmediate newImm = imm;
2720            newImm.m_value.imm4 = imm4;
2721
2722            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmFirst(op, newImm));
2723            m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm));
2724        }
2725
2726        ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
2727        {
2728            m_buffer.putShort(op | reg1);
2729            m_buffer.putShort((reg2 << 12) | imm);
2730        }
2731
2732        ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3)
2733        {
2734            m_buffer.putShort(op | reg1);
2735            m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3);
2736        }
2737
2738        // Formats up instructions of the pattern:
2739        //    111111111B11aaaa:bbbb222SA2C2cccc
2740        // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit.
2741        // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc.
2742        ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c)
2743        {
2744            ASSERT(!(op1 & 0x004f));
2745            ASSERT(!(op2 & 0xf1af));
2746            m_buffer.putShort(op1 | b.bits1() << 6 | a.bits4());
2747            m_buffer.putShort(op2 | b.bits4() << 12 | size << 8 | a.bits1() << 7 | c.bits1() << 5 | c.bits4());
2748        }
2749
2750        // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
2751        // (i.e. +/-(0..255) 32-bit words)
2752        ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm)
2753        {
2754            bool up = true;
2755            if (imm < 0) {
2756                imm = -imm;
2757                up = false;
2758            }
2759
2760            uint32_t offset = imm;
2761            ASSERT(!(offset & ~0x3fc));
2762            offset >>= 2;
2763
2764            m_buffer.putShort(op1 | (up << 7) | rd.bits1() << 6 | rn);
2765            m_buffer.putShort(op2 | rd.bits4() << 12 | size << 8 | offset);
2766        }
2767
2768        // Administrative methods:
2769
2770        size_t codeSize() const { return m_buffer.codeSize(); }
2771        AssemblerLabel label() const { return m_buffer.label(); }
2772        bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
2773        void* data() const { return m_buffer.data(); }
2774
2775        unsigned debugOffset() { return m_buffer.debugOffset(); }
2776
2777    private:
2778        AssemblerBuffer m_buffer;
2779    } m_formatter;
2780
2781    Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
2782    int m_indexOfLastWatchpoint;
2783    int m_indexOfTailOfLastWatchpoint;
2784};
2785
2786} // namespace JSC
2787
2788#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
2789
2790#endif // ARMAssembler_h
2791