1/*
2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef AbstractMacroAssembler_h
27#define AbstractMacroAssembler_h
28
29#include "AssemblerBuffer.h"
30#include "CodeLocation.h"
31#include "MacroAssemblerCodeRef.h"
32#include <wtf/CryptographicallyRandomNumber.h>
33#include <wtf/Noncopyable.h>
34
35#if ENABLE(ASSEMBLER)
36
37
38#if PLATFORM(QT)
39#define ENABLE_JIT_CONSTANT_BLINDING 0
40#endif
41
42#ifndef ENABLE_JIT_CONSTANT_BLINDING
43#define ENABLE_JIT_CONSTANT_BLINDING 1
44#endif
45
46namespace JSC {
47
48inline bool isARMv7s()
49{
50#if CPU(APPLE_ARMV7S)
51    return true;
52#else
53    return false;
54#endif
55}
56
57inline bool isX86()
58{
59#if CPU(X86_64) || CPU(X86)
60    return true;
61#else
62    return false;
63#endif
64}
65
66class JumpReplacementWatchpoint;
67class LinkBuffer;
68class RepatchBuffer;
69class Watchpoint;
70namespace DFG {
71struct OSRExit;
72}
73
74template <class AssemblerType>
75class AbstractMacroAssembler {
76public:
77    friend class JITWriteBarrierBase;
78    typedef AssemblerType AssemblerType_T;
79
80    typedef MacroAssemblerCodePtr CodePtr;
81    typedef MacroAssemblerCodeRef CodeRef;
82
83    class Jump;
84
85    typedef typename AssemblerType::RegisterID RegisterID;
86
87    // Section 1: MacroAssembler operand types
88    //
89    // The following types are used as operands to MacroAssembler operations,
90    // describing immediate  and memory operands to the instructions to be planted.
91
92    enum Scale {
93        TimesOne,
94        TimesTwo,
95        TimesFour,
96        TimesEight,
97    };
98
99    // Address:
100    //
101    // Describes a simple base-offset address.
102    struct Address {
103        explicit Address(RegisterID base, int32_t offset = 0)
104            : base(base)
105            , offset(offset)
106        {
107        }
108
109        RegisterID base;
110        int32_t offset;
111    };
112
113    struct ExtendedAddress {
114        explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
115            : base(base)
116            , offset(offset)
117        {
118        }
119
120        RegisterID base;
121        intptr_t offset;
122    };
123
124    // ImplicitAddress:
125    //
126    // This class is used for explicit 'load' and 'store' operations
127    // (as opposed to situations in which a memory operand is provided
128    // to a generic operation, such as an integer arithmetic instruction).
129    //
130    // In the case of a load (or store) operation we want to permit
131    // addresses to be implicitly constructed, e.g. the two calls:
132    //
133    //     load32(Address(addrReg), destReg);
134    //     load32(addrReg, destReg);
135    //
136    // Are equivalent, and the explicit wrapping of the Address in the former
137    // is unnecessary.
138    struct ImplicitAddress {
139        ImplicitAddress(RegisterID base)
140            : base(base)
141            , offset(0)
142        {
143        }
144
145        ImplicitAddress(Address address)
146            : base(address.base)
147            , offset(address.offset)
148        {
149        }
150
151        RegisterID base;
152        int32_t offset;
153    };
154
155    // BaseIndex:
156    //
157    // Describes a complex addressing mode.
158    struct BaseIndex {
159        BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
160            : base(base)
161            , index(index)
162            , scale(scale)
163            , offset(offset)
164        {
165        }
166
167        RegisterID base;
168        RegisterID index;
169        Scale scale;
170        int32_t offset;
171    };
172
173    // AbsoluteAddress:
174    //
175    // Describes an memory operand given by a pointer.  For regular load & store
176    // operations an unwrapped void* will be used, rather than using this.
177    struct AbsoluteAddress {
178        explicit AbsoluteAddress(const void* ptr)
179            : m_ptr(ptr)
180        {
181        }
182
183        const void* m_ptr;
184    };
185
186    // TrustedImmPtr:
187    //
188    // A pointer sized immediate operand to an instruction - this is wrapped
189    // in a class requiring explicit construction in order to differentiate
190    // from pointers used as absolute addresses to memory operations
191    struct TrustedImmPtr {
192        TrustedImmPtr() { }
193
194        explicit TrustedImmPtr(const void* value)
195            : m_value(value)
196        {
197        }
198
199        // This is only here so that TrustedImmPtr(0) does not confuse the C++
200        // overload handling rules.
201        explicit TrustedImmPtr(int value)
202            : m_value(0)
203        {
204            ASSERT_UNUSED(value, !value);
205        }
206
207        explicit TrustedImmPtr(size_t value)
208            : m_value(reinterpret_cast<void*>(value))
209        {
210        }
211
212        intptr_t asIntptr()
213        {
214            return reinterpret_cast<intptr_t>(m_value);
215        }
216
217        const void* m_value;
218    };
219
220    struct ImmPtr :
221#if ENABLE(JIT_CONSTANT_BLINDING)
222        private TrustedImmPtr
223#else
224        public TrustedImmPtr
225#endif
226    {
227        explicit ImmPtr(const void* value)
228            : TrustedImmPtr(value)
229        {
230        }
231
232        TrustedImmPtr asTrustedImmPtr() { return *this; }
233    };
234
235    // TrustedImm32:
236    //
237    // A 32bit immediate operand to an instruction - this is wrapped in a
238    // class requiring explicit construction in order to prevent RegisterIDs
239    // (which are implemented as an enum) from accidentally being passed as
240    // immediate values.
241    struct TrustedImm32 {
242        TrustedImm32() { }
243
244        explicit TrustedImm32(int32_t value)
245            : m_value(value)
246        {
247        }
248
249#if !CPU(X86_64)
250        explicit TrustedImm32(TrustedImmPtr ptr)
251            : m_value(ptr.asIntptr())
252        {
253        }
254#endif
255
256        int32_t m_value;
257    };
258
259
260    struct Imm32 :
261#if ENABLE(JIT_CONSTANT_BLINDING)
262        private TrustedImm32
263#else
264        public TrustedImm32
265#endif
266    {
267        explicit Imm32(int32_t value)
268            : TrustedImm32(value)
269        {
270        }
271#if !CPU(X86_64)
272        explicit Imm32(TrustedImmPtr ptr)
273            : TrustedImm32(ptr)
274        {
275        }
276#endif
277        const TrustedImm32& asTrustedImm32() const { return *this; }
278
279    };
280
281    // TrustedImm64:
282    //
283    // A 64bit immediate operand to an instruction - this is wrapped in a
284    // class requiring explicit construction in order to prevent RegisterIDs
285    // (which are implemented as an enum) from accidentally being passed as
286    // immediate values.
287    struct TrustedImm64 {
288        TrustedImm64() { }
289
290        explicit TrustedImm64(int64_t value)
291            : m_value(value)
292        {
293        }
294
295#if CPU(X86_64)
296        explicit TrustedImm64(TrustedImmPtr ptr)
297            : m_value(ptr.asIntptr())
298        {
299        }
300#endif
301
302        int64_t m_value;
303    };
304
305    struct Imm64 :
306#if ENABLE(JIT_CONSTANT_BLINDING)
307        private TrustedImm64
308#else
309        public TrustedImm64
310#endif
311    {
312        explicit Imm64(int64_t value)
313            : TrustedImm64(value)
314        {
315        }
316#if CPU(X86_64)
317        explicit Imm64(TrustedImmPtr ptr)
318            : TrustedImm64(ptr)
319        {
320        }
321#endif
322        const TrustedImm64& asTrustedImm64() const { return *this; }
323    };
324
325    // Section 2: MacroAssembler code buffer handles
326    //
327    // The following types are used to reference items in the code buffer
328    // during JIT code generation.  For example, the type Jump is used to
329    // track the location of a jump instruction so that it may later be
330    // linked to a label marking its destination.
331
332
333    // Label:
334    //
335    // A Label records a point in the generated instruction stream, typically such that
336    // it may be used as a destination for a jump.
337    class Label {
338        template<class TemplateAssemblerType>
339        friend class AbstractMacroAssembler;
340        friend struct DFG::OSRExit;
341        friend class Jump;
342        friend class JumpReplacementWatchpoint;
343        friend class MacroAssemblerCodeRef;
344        friend class LinkBuffer;
345        friend class Watchpoint;
346
347    public:
348        Label()
349        {
350        }
351
352        Label(AbstractMacroAssembler<AssemblerType>* masm)
353            : m_label(masm->m_assembler.label())
354        {
355        }
356
357        bool isSet() const { return m_label.isSet(); }
358    private:
359        AssemblerLabel m_label;
360    };
361
362    // ConvertibleLoadLabel:
363    //
364    // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
365    // so that:
366    //
367    // loadPtr(Address(a, i), b)
368    //
369    // becomes:
370    //
371    // addPtr(TrustedImmPtr(i), a, b)
372    class ConvertibleLoadLabel {
373        template<class TemplateAssemblerType>
374        friend class AbstractMacroAssembler;
375        friend class LinkBuffer;
376
377    public:
378        ConvertibleLoadLabel()
379        {
380        }
381
382        ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
383            : m_label(masm->m_assembler.labelIgnoringWatchpoints())
384        {
385        }
386
387        bool isSet() const { return m_label.isSet(); }
388    private:
389        AssemblerLabel m_label;
390    };
391
392    // DataLabelPtr:
393    //
394    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
395    // patched after the code has been generated.
396    class DataLabelPtr {
397        template<class TemplateAssemblerType>
398        friend class AbstractMacroAssembler;
399        friend class LinkBuffer;
400    public:
401        DataLabelPtr()
402        {
403        }
404
405        DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
406            : m_label(masm->m_assembler.label())
407        {
408        }
409
410        bool isSet() const { return m_label.isSet(); }
411
412    private:
413        AssemblerLabel m_label;
414    };
415
416    // DataLabel32:
417    //
418    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
419    // patched after the code has been generated.
420    class DataLabel32 {
421        template<class TemplateAssemblerType>
422        friend class AbstractMacroAssembler;
423        friend class LinkBuffer;
424    public:
425        DataLabel32()
426        {
427        }
428
429        DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
430            : m_label(masm->m_assembler.label())
431        {
432        }
433
434        AssemblerLabel label() const { return m_label; }
435
436    private:
437        AssemblerLabel m_label;
438    };
439
440    // DataLabelCompact:
441    //
442    // A DataLabelCompact is used to refer to a location in the code containing a
443    // compact immediate to be patched after the code has been generated.
444    class DataLabelCompact {
445        template<class TemplateAssemblerType>
446        friend class AbstractMacroAssembler;
447        friend class LinkBuffer;
448    public:
449        DataLabelCompact()
450        {
451        }
452
453        DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
454            : m_label(masm->m_assembler.label())
455        {
456        }
457
458        DataLabelCompact(AssemblerLabel label)
459            : m_label(label)
460        {
461        }
462
463    private:
464        AssemblerLabel m_label;
465    };
466
467    // Call:
468    //
469    // A Call object is a reference to a call instruction that has been planted
470    // into the code buffer - it is typically used to link the call, setting the
471    // relative offset such that when executed it will call to the desired
472    // destination.
473    class Call {
474        template<class TemplateAssemblerType>
475        friend class AbstractMacroAssembler;
476
477    public:
478        enum Flags {
479            None = 0x0,
480            Linkable = 0x1,
481            Near = 0x2,
482            LinkableNear = 0x3,
483        };
484
485        Call()
486            : m_flags(None)
487        {
488        }
489
490        Call(AssemblerLabel jmp, Flags flags)
491            : m_label(jmp)
492            , m_flags(flags)
493        {
494        }
495
496        bool isFlagSet(Flags flag)
497        {
498            return m_flags & flag;
499        }
500
501        static Call fromTailJump(Jump jump)
502        {
503            return Call(jump.m_label, Linkable);
504        }
505
506        AssemblerLabel m_label;
507    private:
508        Flags m_flags;
509    };
510
511    // Jump:
512    //
513    // A jump object is a reference to a jump instruction that has been planted
514    // into the code buffer - it is typically used to link the jump, setting the
515    // relative offset such that when executed it will jump to the desired
516    // destination.
517    class Jump {
518        template<class TemplateAssemblerType>
519        friend class AbstractMacroAssembler;
520        friend class Call;
521        friend struct DFG::OSRExit;
522        friend class LinkBuffer;
523    public:
524        Jump()
525        {
526        }
527
528#if CPU(ARM_THUMB2)
529        // Fixme: this information should be stored in the instruction stream, not in the Jump object.
530        Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
531            : m_label(jmp)
532            , m_type(type)
533            , m_condition(condition)
534        {
535        }
536#elif CPU(SH4)
537        Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
538            : m_label(jmp)
539            , m_type(type)
540        {
541        }
542#else
543        Jump(AssemblerLabel jmp)
544            : m_label(jmp)
545        {
546        }
547#endif
548
549        Label label() const
550        {
551            Label result;
552            result.m_label = m_label;
553            return result;
554        }
555
556        void link(AbstractMacroAssembler<AssemblerType>* masm) const
557        {
558#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
559            masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
560#endif
561
562#if CPU(ARM_THUMB2)
563            masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
564#elif CPU(SH4)
565            masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
566#else
567            masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
568#endif
569        }
570
571        void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
572        {
573#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
574            masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
575#endif
576
577#if CPU(ARM_THUMB2)
578            masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
579#else
580            masm->m_assembler.linkJump(m_label, label.m_label);
581#endif
582        }
583
584        bool isSet() const { return m_label.isSet(); }
585
586    private:
587        AssemblerLabel m_label;
588#if CPU(ARM_THUMB2)
589        ARMv7Assembler::JumpType m_type;
590        ARMv7Assembler::Condition m_condition;
591#endif
592#if CPU(SH4)
593        SH4Assembler::JumpType m_type;
594#endif
595    };
596
597    struct PatchableJump {
598        PatchableJump()
599        {
600        }
601
602        explicit PatchableJump(Jump jump)
603            : m_jump(jump)
604        {
605        }
606
607        operator Jump&() { return m_jump; }
608
609        Jump m_jump;
610    };
611
612    // JumpList:
613    //
614    // A JumpList is a set of Jump objects.
615    // All jumps in the set will be linked to the same destination.
616    class JumpList {
617        friend class LinkBuffer;
618
619    public:
620        typedef Vector<Jump, 2> JumpVector;
621
622        JumpList() { }
623
624        JumpList(Jump jump)
625        {
626            append(jump);
627        }
628
629        void link(AbstractMacroAssembler<AssemblerType>* masm)
630        {
631            size_t size = m_jumps.size();
632            for (size_t i = 0; i < size; ++i)
633                m_jumps[i].link(masm);
634            m_jumps.clear();
635        }
636
637        void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
638        {
639            size_t size = m_jumps.size();
640            for (size_t i = 0; i < size; ++i)
641                m_jumps[i].linkTo(label, masm);
642            m_jumps.clear();
643        }
644
645        void append(Jump jump)
646        {
647            m_jumps.append(jump);
648        }
649
650        void append(const JumpList& other)
651        {
652            m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
653        }
654
655        bool empty()
656        {
657            return !m_jumps.size();
658        }
659
660        void clear()
661        {
662            m_jumps.clear();
663        }
664
665        const JumpVector& jumps() const { return m_jumps; }
666
667    private:
668        JumpVector m_jumps;
669    };
670
671
672    // Section 3: Misc admin methods
673#if ENABLE(DFG_JIT)
674    Label labelIgnoringWatchpoints()
675    {
676        Label result;
677        result.m_label = m_assembler.labelIgnoringWatchpoints();
678        return result;
679    }
680#else
681    Label labelIgnoringWatchpoints()
682    {
683        return label();
684    }
685#endif
686
687    Label label()
688    {
689        return Label(this);
690    }
691
692    void padBeforePatch()
693    {
694        // Rely on the fact that asking for a label already does the padding.
695        (void)label();
696    }
697
698    Label watchpointLabel()
699    {
700        Label result;
701        result.m_label = m_assembler.labelForWatchpoint();
702        return result;
703    }
704
705    Label align()
706    {
707        m_assembler.align(16);
708        return Label(this);
709    }
710
711#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
712    class RegisterAllocationOffset {
713    public:
714        RegisterAllocationOffset(unsigned offset)
715            : m_offset(offset)
716        {
717        }
718
719        void check(unsigned low, unsigned high)
720        {
721            RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
722        }
723
724    private:
725        unsigned m_offset;
726    };
727
728    void addRegisterAllocationAtOffset(unsigned offset)
729    {
730        m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
731    }
732
733    void clearRegisterAllocationOffsets()
734    {
735        m_registerAllocationForOffsets.clear();
736    }
737
738    void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
739    {
740        if (offset1 > offset2)
741            std::swap(offset1, offset2);
742
743        size_t size = m_registerAllocationForOffsets.size();
744        for (size_t i = 0; i < size; ++i)
745            m_registerAllocationForOffsets[i].check(offset1, offset2);
746    }
747#endif
748
749    template<typename T, typename U>
750    static ptrdiff_t differenceBetween(T from, U to)
751    {
752        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
753    }
754
755    static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
756    {
757        return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
758    }
759
760    unsigned debugOffset() { return m_assembler.debugOffset(); }
761
762    ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
763    {
764        AssemblerType::cacheFlush(code, size);
765    }
766protected:
767    AbstractMacroAssembler()
768        : m_randomSource(cryptographicallyRandomNumber())
769    {
770    }
771
772    AssemblerType m_assembler;
773
774    uint32_t random()
775    {
776        return m_randomSource.getUint32();
777    }
778
779    WeakRandom m_randomSource;
780
781#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
782    Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
783#endif
784
785#if ENABLE(JIT_CONSTANT_BLINDING)
786    static bool scratchRegisterForBlinding() { return false; }
787    static bool shouldBlindForSpecificArch(uint32_t) { return true; }
788    static bool shouldBlindForSpecificArch(uint64_t) { return true; }
789#endif
790
791    friend class LinkBuffer;
792    friend class RepatchBuffer;
793
794    static void linkJump(void* code, Jump jump, CodeLocationLabel target)
795    {
796        AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
797    }
798
799    static void linkPointer(void* code, AssemblerLabel label, void* value)
800    {
801        AssemblerType::linkPointer(code, label, value);
802    }
803
804    static void* getLinkerAddress(void* code, AssemblerLabel label)
805    {
806        return AssemblerType::getRelocatedAddress(code, label);
807    }
808
809    static unsigned getLinkerCallReturnOffset(Call call)
810    {
811        return AssemblerType::getCallReturnOffset(call.m_label);
812    }
813
814    static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
815    {
816        AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
817    }
818
819    static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
820    {
821        AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
822    }
823
824    static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
825    {
826        AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
827    }
828
829    static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
830    {
831        AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
832    }
833
834    static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
835    {
836        AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
837    }
838
839    static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
840    {
841        return AssemblerType::readPointer(dataLabelPtr.dataLocation());
842    }
843
844    static void replaceWithLoad(CodeLocationConvertibleLoad label)
845    {
846        AssemblerType::replaceWithLoad(label.dataLocation());
847    }
848
849    static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
850    {
851        AssemblerType::replaceWithAddressComputation(label.dataLocation());
852    }
853};
854
855} // namespace JSC
856
857#endif // ENABLE(ASSEMBLER)
858
859#endif // AbstractMacroAssembler_h
860