1/*
2 * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
3 * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved.
4 * Copyright (C) 2008 Apple Inc. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*/
27
28#ifndef MacroAssemblerSH4_h
29#define MacroAssemblerSH4_h
30
31#if ENABLE(ASSEMBLER) && CPU(SH4)
32
33#include "SH4Assembler.h"
34#include "AbstractMacroAssembler.h"
35#include <wtf/Assertions.h>
36
37namespace JSC {
38
39class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler> {
40public:
41    typedef SH4Assembler::FPRegisterID FPRegisterID;
42
43    static const Scale ScalePtr = TimesFour;
44    static const FPRegisterID fscratch = SH4Registers::fr10;
45    static const RegisterID stackPointerRegister = SH4Registers::sp;
46    static const RegisterID linkRegister = SH4Registers::pr;
47    static const RegisterID scratchReg3 = SH4Registers::r13;
48
49    static const int MaximumCompactPtrAlignedAddressOffset = 60;
50
51    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
52    {
53        return (value >= 0) && (value <= MaximumCompactPtrAlignedAddressOffset);
54    }
55
56    enum RelationalCondition {
57        Equal = SH4Assembler::EQ,
58        NotEqual = SH4Assembler::NE,
59        Above = SH4Assembler::HI,
60        AboveOrEqual = SH4Assembler::HS,
61        Below = SH4Assembler::LI,
62        BelowOrEqual = SH4Assembler::LS,
63        GreaterThan = SH4Assembler::GT,
64        GreaterThanOrEqual = SH4Assembler::GE,
65        LessThan = SH4Assembler::LT,
66        LessThanOrEqual = SH4Assembler::LE
67    };
68
69    enum ResultCondition {
70        Overflow = SH4Assembler::OF,
71        Signed = SH4Assembler::SI,
72        PositiveOrZero = SH4Assembler::NS,
73        Zero = SH4Assembler::EQ,
74        NonZero = SH4Assembler::NE
75    };
76
77    enum DoubleCondition {
78        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
79        DoubleEqual = SH4Assembler::EQ,
80        DoubleNotEqual = SH4Assembler::NE,
81        DoubleGreaterThan = SH4Assembler::GT,
82        DoubleGreaterThanOrEqual = SH4Assembler::GE,
83        DoubleLessThan = SH4Assembler::LT,
84        DoubleLessThanOrEqual = SH4Assembler::LE,
85        // If either operand is NaN, these conditions always evaluate to true.
86        DoubleEqualOrUnordered = SH4Assembler::EQU,
87        DoubleNotEqualOrUnordered = SH4Assembler::NEU,
88        DoubleGreaterThanOrUnordered = SH4Assembler::GTU,
89        DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU,
90        DoubleLessThanOrUnordered = SH4Assembler::LTU,
91        DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU,
92    };
93
94    RegisterID claimScratch()
95    {
96        return m_assembler.claimScratch();
97    }
98
99    void releaseScratch(RegisterID reg)
100    {
101        m_assembler.releaseScratch(reg);
102    }
103
104    // Integer arithmetic operations
105
106    void add32(RegisterID src, RegisterID dest)
107    {
108        m_assembler.addlRegReg(src, dest);
109    }
110
111    void add32(TrustedImm32 imm, RegisterID dest)
112    {
113        if (!imm.m_value)
114            return;
115
116        if (m_assembler.isImmediate(imm.m_value)) {
117            m_assembler.addlImm8r(imm.m_value, dest);
118            return;
119        }
120
121        RegisterID scr = claimScratch();
122        m_assembler.loadConstant(imm.m_value, scr);
123        m_assembler.addlRegReg(scr, dest);
124        releaseScratch(scr);
125    }
126
127    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
128    {
129        if (src != dest)
130            m_assembler.movlRegReg(src, dest);
131        add32(imm, dest);
132    }
133
134    void add32(TrustedImm32 imm, Address address)
135    {
136        if (!imm.m_value)
137            return;
138
139        RegisterID scr = claimScratch();
140        load32(address, scr);
141        add32(imm, scr);
142        store32(scr, address);
143        releaseScratch(scr);
144    }
145
146    void add32(Address src, RegisterID dest)
147    {
148        RegisterID scr = claimScratch();
149        load32(src, scr);
150        m_assembler.addlRegReg(scr, dest);
151        releaseScratch(scr);
152    }
153
154    void add32(AbsoluteAddress src, RegisterID dest)
155    {
156        RegisterID scr = claimScratch();
157        load32(src.m_ptr, scr);
158        m_assembler.addlRegReg(scr, dest);
159        releaseScratch(scr);
160    }
161
162    void and32(RegisterID src, RegisterID dest)
163    {
164        m_assembler.andlRegReg(src, dest);
165    }
166
167    void and32(TrustedImm32 imm, RegisterID dest)
168    {
169        if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
170            m_assembler.andlImm8r(imm.m_value, dest);
171            return;
172        }
173
174        RegisterID scr = claimScratch();
175        m_assembler.loadConstant(imm.m_value, scr);
176        m_assembler.andlRegReg(scr, dest);
177        releaseScratch(scr);
178    }
179
180    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
181    {
182        if (src != dest) {
183            move(imm, dest);
184            and32(src, dest);
185            return;
186        }
187
188        and32(imm, dest);
189    }
190
191    void lshift32(RegisterID shiftamount, RegisterID dest)
192    {
193        RegisterID shiftTmp = claimScratch();
194        m_assembler.loadConstant(0x1f, shiftTmp);
195        m_assembler.andlRegReg(shiftamount, shiftTmp);
196        m_assembler.shldRegReg(dest, shiftTmp);
197        releaseScratch(shiftTmp);
198    }
199
200    void lshift32(TrustedImm32 imm, RegisterID dest)
201    {
202        int immMasked = imm.m_value & 0x1f;
203        if (!immMasked)
204            return;
205
206        if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
207            m_assembler.shllImm8r(immMasked, dest);
208            return;
209        }
210
211        RegisterID shiftTmp = claimScratch();
212        m_assembler.loadConstant(immMasked, shiftTmp);
213        m_assembler.shldRegReg(dest, shiftTmp);
214        releaseScratch(shiftTmp);
215    }
216
217    void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
218    {
219        if (src != dest)
220            move(src, dest);
221
222        lshift32(shiftamount, dest);
223    }
224
225    void mul32(RegisterID src, RegisterID dest)
226    {
227        m_assembler.imullRegReg(src, dest);
228        m_assembler.stsmacl(dest);
229    }
230
231    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
232    {
233        RegisterID scr = claimScratch();
234        move(imm, scr);
235        if (src != dest)
236            move(src, dest);
237        mul32(scr, dest);
238        releaseScratch(scr);
239    }
240
241    void or32(RegisterID src, RegisterID dest)
242    {
243        m_assembler.orlRegReg(src, dest);
244    }
245
246    void or32(TrustedImm32 imm, RegisterID dest)
247    {
248        if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) {
249            m_assembler.orlImm8r(imm.m_value, dest);
250            return;
251        }
252
253        RegisterID scr = claimScratch();
254        m_assembler.loadConstant(imm.m_value, scr);
255        m_assembler.orlRegReg(scr, dest);
256        releaseScratch(scr);
257    }
258
259    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
260    {
261        if (op1 == op2)
262            move(op1, dest);
263        else if (op1 == dest)
264            or32(op2, dest);
265        else {
266            move(op2, dest);
267            or32(op1, dest);
268        }
269    }
270
271    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
272    {
273        if (src != dest) {
274            move(imm, dest);
275            or32(src, dest);
276            return;
277        }
278
279        or32(imm, dest);
280    }
281
282    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
283    {
284        if (src != dest) {
285            move(imm, dest);
286            xor32(src, dest);
287            return;
288        }
289
290        xor32(imm, dest);
291    }
292
293    void rshift32(RegisterID shiftamount, RegisterID dest)
294    {
295        RegisterID shiftTmp = claimScratch();
296        m_assembler.loadConstant(0x1f, shiftTmp);
297        m_assembler.andlRegReg(shiftamount, shiftTmp);
298        m_assembler.neg(shiftTmp, shiftTmp);
299        m_assembler.shadRegReg(dest, shiftTmp);
300        releaseScratch(shiftTmp);
301    }
302
303    void rshift32(TrustedImm32 imm, RegisterID dest)
304    {
305        int immMasked = imm.m_value & 0x1f;
306        if (!immMasked)
307            return;
308
309        if (immMasked == 1) {
310            m_assembler.sharImm8r(immMasked, dest);
311            return;
312        }
313
314        RegisterID shiftTmp = claimScratch();
315        m_assembler.loadConstant(-immMasked, shiftTmp);
316        m_assembler.shadRegReg(dest, shiftTmp);
317        releaseScratch(shiftTmp);
318    }
319
320    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
321    {
322        if (src != dest)
323            move(src, dest);
324        rshift32(imm, dest);
325    }
326
327    void sub32(RegisterID src, RegisterID dest)
328    {
329        m_assembler.sublRegReg(src, dest);
330    }
331
332    void sub32(TrustedImm32 imm, AbsoluteAddress address)
333    {
334        if (!imm.m_value)
335            return;
336
337        RegisterID result = claimScratch();
338        RegisterID scratchReg = claimScratch();
339
340        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
341        m_assembler.movlMemReg(scratchReg, result);
342
343        if (m_assembler.isImmediate(-imm.m_value))
344            m_assembler.addlImm8r(-imm.m_value, result);
345        else {
346            m_assembler.loadConstant(imm.m_value, scratchReg3);
347            m_assembler.sublRegReg(scratchReg3, result);
348        }
349
350        store32(result, scratchReg);
351        releaseScratch(result);
352        releaseScratch(scratchReg);
353    }
354
355    void add32(TrustedImm32 imm, AbsoluteAddress address)
356    {
357        if (!imm.m_value)
358            return;
359
360        RegisterID result = claimScratch();
361        RegisterID scratchReg = claimScratch();
362
363        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scratchReg);
364        m_assembler.movlMemReg(scratchReg, result);
365
366        if (m_assembler.isImmediate(imm.m_value))
367            m_assembler.addlImm8r(imm.m_value, result);
368        else {
369            m_assembler.loadConstant(imm.m_value, scratchReg3);
370            m_assembler.addlRegReg(scratchReg3, result);
371        }
372
373        store32(result, scratchReg);
374        releaseScratch(result);
375        releaseScratch(scratchReg);
376    }
377
378    void add64(TrustedImm32 imm, AbsoluteAddress address)
379    {
380        RegisterID scr1 = claimScratch();
381        RegisterID scr2 = claimScratch();
382
383        // Add 32-bit LSB first.
384        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scr1);
385        m_assembler.movlMemReg(scr1, scr1); // scr1 = 32-bit LSB of int64 @ address
386        m_assembler.loadConstant(imm.m_value, scr2);
387        m_assembler.clrt();
388        m_assembler.addclRegReg(scr1, scr2);
389        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr), scr1);
390        m_assembler.movlRegMem(scr2, scr1); // Update address with 32-bit LSB result.
391
392        // Then add 32-bit MSB.
393        m_assembler.addlImm8r(4, scr1);
394        m_assembler.movlMemReg(scr1, scr1); // scr1 = 32-bit MSB of int64 @ address
395        m_assembler.movt(scr2);
396        if (imm.m_value < 0)
397            m_assembler.addlImm8r(-1, scr2); // Sign extend imm value if needed.
398        m_assembler.addvlRegReg(scr2, scr1);
399        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address.m_ptr) + 4, scr2);
400        m_assembler.movlRegMem(scr1, scr2); // Update (address + 4) with 32-bit MSB result.
401
402        releaseScratch(scr2);
403        releaseScratch(scr1);
404    }
405
406    void sub32(TrustedImm32 imm, RegisterID dest)
407    {
408        if (!imm.m_value)
409            return;
410
411        if (m_assembler.isImmediate(-imm.m_value)) {
412            m_assembler.addlImm8r(-imm.m_value, dest);
413            return;
414        }
415
416        RegisterID scr = claimScratch();
417        m_assembler.loadConstant(imm.m_value, scr);
418        m_assembler.sublRegReg(scr, dest);
419        releaseScratch(scr);
420    }
421
422    void sub32(Address src, RegisterID dest)
423    {
424        RegisterID scr = claimScratch();
425        load32(src, scr);
426        m_assembler.sublRegReg(scr, dest);
427        releaseScratch(scr);
428    }
429
430    void xor32(RegisterID src, RegisterID dest)
431    {
432        m_assembler.xorlRegReg(src, dest);
433    }
434
435    void xor32(TrustedImm32 imm, RegisterID srcDest)
436    {
437        if (imm.m_value == -1) {
438            m_assembler.notlReg(srcDest, srcDest);
439            return;
440        }
441
442        if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) {
443            RegisterID scr = claimScratch();
444            m_assembler.loadConstant(imm.m_value, scr);
445            m_assembler.xorlRegReg(scr, srcDest);
446            releaseScratch(scr);
447            return;
448        }
449
450        m_assembler.xorlImm8r(imm.m_value, srcDest);
451    }
452
453    void compare32(int imm, RegisterID dst, RelationalCondition cond)
454    {
455        if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) {
456            m_assembler.cmpEqImmR0(imm, dst);
457            return;
458        }
459
460        RegisterID scr = claimScratch();
461        m_assembler.loadConstant(imm, scr);
462        m_assembler.cmplRegReg(scr, dst, SH4Condition(cond));
463        releaseScratch(scr);
464    }
465
466    void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond)
467    {
468        RegisterID scr = claimScratch();
469        if (!offset) {
470            m_assembler.movlMemReg(base, scr);
471            m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
472            releaseScratch(scr);
473            return;
474        }
475
476        if ((offset < 0) || (offset >= 64)) {
477            m_assembler.loadConstant(offset, scr);
478            m_assembler.addlRegReg(base, scr);
479            m_assembler.movlMemReg(scr, scr);
480            m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
481            releaseScratch(scr);
482            return;
483        }
484
485        m_assembler.movlMemReg(offset >> 2, base, scr);
486        m_assembler.cmplRegReg(scr, left, SH4Condition(cond));
487        releaseScratch(scr);
488    }
489
490    void testImm(int imm, int offset, RegisterID base)
491    {
492        RegisterID scr = claimScratch();
493        RegisterID scr1 = claimScratch();
494
495        if ((offset < 0) || (offset >= 64)) {
496            m_assembler.loadConstant(offset, scr);
497            m_assembler.addlRegReg(base, scr);
498            m_assembler.movlMemReg(scr, scr);
499        } else if (offset)
500            m_assembler.movlMemReg(offset >> 2, base, scr);
501        else
502            m_assembler.movlMemReg(base, scr);
503        if (m_assembler.isImmediate(imm))
504            m_assembler.movImm8(imm, scr1);
505        else
506            m_assembler.loadConstant(imm, scr1);
507
508        m_assembler.testlRegReg(scr, scr1);
509        releaseScratch(scr);
510        releaseScratch(scr1);
511    }
512
513    void testlImm(int imm, RegisterID dst)
514    {
515        if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) {
516            m_assembler.testlImm8r(imm, dst);
517            return;
518        }
519
520        RegisterID scr = claimScratch();
521        m_assembler.loadConstant(imm, scr);
522        m_assembler.testlRegReg(scr, dst);
523        releaseScratch(scr);
524    }
525
526    void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond)
527    {
528        if (!offset) {
529            RegisterID scr = claimScratch();
530            m_assembler.movlMemReg(base, scr);
531            m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
532            releaseScratch(scr);
533            return;
534        }
535
536        if ((offset < 0) || (offset >= 64)) {
537            RegisterID scr = claimScratch();
538            m_assembler.loadConstant(offset, scr);
539            m_assembler.addlRegReg(base, scr);
540            m_assembler.movlMemReg(scr, scr);
541            m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
542            releaseScratch(scr);
543            return;
544        }
545
546        RegisterID scr = claimScratch();
547        m_assembler.movlMemReg(offset >> 2, base, scr);
548        m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
549        releaseScratch(scr);
550    }
551
552    void compare32(int imm, int offset, RegisterID base, RelationalCondition cond)
553    {
554        if (!offset) {
555            RegisterID scr = claimScratch();
556            RegisterID scr1 = claimScratch();
557            m_assembler.movlMemReg(base, scr);
558            m_assembler.loadConstant(imm, scr1);
559            m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
560            releaseScratch(scr1);
561            releaseScratch(scr);
562            return;
563        }
564
565        if ((offset < 0) || (offset >= 64)) {
566            RegisterID scr = claimScratch();
567            RegisterID scr1 = claimScratch();
568            m_assembler.loadConstant(offset, scr);
569            m_assembler.addlRegReg(base, scr);
570            m_assembler.movlMemReg(scr, scr);
571            m_assembler.loadConstant(imm, scr1);
572            m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
573            releaseScratch(scr1);
574            releaseScratch(scr);
575            return;
576        }
577
578        RegisterID scr = claimScratch();
579        RegisterID scr1 = claimScratch();
580        m_assembler.movlMemReg(offset >> 2, base, scr);
581        m_assembler.loadConstant(imm, scr1);
582        m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
583        releaseScratch(scr1);
584        releaseScratch(scr);
585    }
586
587    // Memory access operation
588
589    void load32(ImplicitAddress address, RegisterID dest)
590    {
591        load32(address.base, address.offset, dest);
592    }
593
594    void load8(ImplicitAddress address, RegisterID dest)
595    {
596        load8(address.base, address.offset, dest);
597    }
598
599    void load8(BaseIndex address, RegisterID dest)
600    {
601        RegisterID scr = claimScratch();
602        move(address.index, scr);
603        lshift32(TrustedImm32(address.scale), scr);
604        add32(address.base, scr);
605        load8(scr, address.offset, dest);
606        releaseScratch(scr);
607    }
608
609    void load8PostInc(RegisterID base, RegisterID dest)
610    {
611        m_assembler.movbMemRegIn(base, dest);
612        m_assembler.extub(dest, dest);
613    }
614
615    void load8Signed(BaseIndex address, RegisterID dest)
616    {
617        RegisterID scr = claimScratch();
618        move(address.index, scr);
619        lshift32(TrustedImm32(address.scale), scr);
620        add32(address.base, scr);
621        load8Signed(scr, address.offset, dest);
622        releaseScratch(scr);
623    }
624
625    void load32(BaseIndex address, RegisterID dest)
626    {
627        RegisterID scr = claimScratch();
628        move(address.index, scr);
629        lshift32(TrustedImm32(address.scale), scr);
630        add32(address.base, scr);
631        load32(scr, address.offset, dest);
632        releaseScratch(scr);
633    }
634
635    void load32(const void* address, RegisterID dest)
636    {
637        m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address)), dest);
638        m_assembler.movlMemReg(dest, dest);
639    }
640
641    void load32(RegisterID base, int offset, RegisterID dest)
642    {
643        if (!offset) {
644            m_assembler.movlMemReg(base, dest);
645            return;
646        }
647
648        if ((offset >= 0) && (offset < 64)) {
649            m_assembler.movlMemReg(offset >> 2, base, dest);
650            return;
651        }
652
653        RegisterID scr = (dest == base) ? claimScratch() : dest;
654
655        m_assembler.loadConstant(offset, scr);
656        if (base == SH4Registers::r0)
657            m_assembler.movlR0mr(scr, dest);
658        else {
659            m_assembler.addlRegReg(base, scr);
660            m_assembler.movlMemReg(scr, dest);
661        }
662
663        if (dest == base)
664            releaseScratch(scr);
665    }
666
667    void load8Signed(RegisterID base, int offset, RegisterID dest)
668    {
669        if (!offset) {
670            m_assembler.movbMemReg(base, dest);
671            return;
672        }
673
674        if ((offset > 0) && (offset <= 15) && (dest == SH4Registers::r0)) {
675            m_assembler.movbMemReg(offset, base, dest);
676            return;
677        }
678
679        RegisterID scr = (dest == base) ? claimScratch() : dest;
680
681        m_assembler.loadConstant(offset, scr);
682        if (base == SH4Registers::r0)
683            m_assembler.movbR0mr(scr, dest);
684        else {
685            m_assembler.addlRegReg(base, scr);
686            m_assembler.movbMemReg(scr, dest);
687        }
688
689        if (dest == base)
690            releaseScratch(scr);
691    }
692
693    void load8(RegisterID base, int offset, RegisterID dest)
694    {
695        load8Signed(base, offset, dest);
696        m_assembler.extub(dest, dest);
697    }
698
699    void load32(RegisterID src, RegisterID dst)
700    {
701        m_assembler.movlMemReg(src, dst);
702    }
703
704    void load16(ImplicitAddress address, RegisterID dest)
705    {
706        if (!address.offset) {
707            m_assembler.movwMemReg(address.base, dest);
708            m_assembler.extuw(dest, dest);
709            return;
710        }
711
712        if ((address.offset > 0) && (address.offset <= 30) && (dest == SH4Registers::r0)) {
713            m_assembler.movwMemReg(address.offset >> 1, address.base, dest);
714            m_assembler.extuw(dest, dest);
715            return;
716        }
717
718        RegisterID scr = (dest == address.base) ? claimScratch() : dest;
719
720        m_assembler.loadConstant(address.offset, scr);
721        if (address.base == SH4Registers::r0)
722            m_assembler.movwR0mr(scr, dest);
723        else {
724            m_assembler.addlRegReg(address.base, scr);
725            m_assembler.movwMemReg(scr, dest);
726        }
727        m_assembler.extuw(dest, dest);
728
729        if (dest == address.base)
730            releaseScratch(scr);
731    }
732
733    void load16Unaligned(BaseIndex address, RegisterID dest)
734    {
735        RegisterID scr = claimScratch();
736        RegisterID scr1 = claimScratch();
737
738        move(address.index, scr);
739        lshift32(TrustedImm32(address.scale), scr);
740
741        if (address.offset)
742            add32(TrustedImm32(address.offset), scr);
743
744        add32(address.base, scr);
745        load8PostInc(scr, scr1);
746        load8(scr, dest);
747        m_assembler.shllImm8r(8, dest);
748        or32(scr1, dest);
749
750        releaseScratch(scr);
751        releaseScratch(scr1);
752    }
753
754    void load16(RegisterID src, RegisterID dest)
755    {
756        m_assembler.movwMemReg(src, dest);
757        m_assembler.extuw(dest, dest);
758    }
759
760    void load16Signed(RegisterID src, RegisterID dest)
761    {
762        m_assembler.movwMemReg(src, dest);
763    }
764
765    void load16(BaseIndex address, RegisterID dest)
766    {
767        load16Signed(address, dest);
768        m_assembler.extuw(dest, dest);
769    }
770
771    void load16PostInc(RegisterID base, RegisterID dest)
772    {
773        m_assembler.movwMemRegIn(base, dest);
774        m_assembler.extuw(dest, dest);
775    }
776
777    void load16Signed(BaseIndex address, RegisterID dest)
778    {
779        RegisterID scr = claimScratch();
780
781        move(address.index, scr);
782        lshift32(TrustedImm32(address.scale), scr);
783
784        if (address.offset)
785            add32(TrustedImm32(address.offset), scr);
786
787        if (address.base == SH4Registers::r0)
788            m_assembler.movwR0mr(scr, dest);
789        else {
790            add32(address.base, scr);
791            load16Signed(scr, dest);
792        }
793
794        releaseScratch(scr);
795    }
796
797    void store8(RegisterID src, BaseIndex address)
798    {
799        RegisterID scr = claimScratch();
800
801        move(address.index, scr);
802        lshift32(TrustedImm32(address.scale), scr);
803        add32(TrustedImm32(address.offset), scr);
804
805        if (address.base == SH4Registers::r0)
806            m_assembler.movbRegMemr0(src, scr);
807        else {
808            add32(address.base, scr);
809            m_assembler.movbRegMem(src, scr);
810        }
811
812        releaseScratch(scr);
813    }
814
815    void store16(RegisterID src, BaseIndex address)
816    {
817        RegisterID scr = claimScratch();
818
819        move(address.index, scr);
820        lshift32(TrustedImm32(address.scale), scr);
821        add32(TrustedImm32(address.offset), scr);
822
823        if (address.base == SH4Registers::r0)
824            m_assembler.movwRegMemr0(src, scr);
825        else {
826            add32(address.base, scr);
827            m_assembler.movwRegMem(src, scr);
828        }
829
830        releaseScratch(scr);
831    }
832
833    void store32(RegisterID src, ImplicitAddress address)
834    {
835        if (!address.offset) {
836            m_assembler.movlRegMem(src, address.base);
837            return;
838        }
839
840        if ((address.offset >= 0) && (address.offset < 64)) {
841            m_assembler.movlRegMem(src, address.offset >> 2, address.base);
842            return;
843        }
844
845        RegisterID scr = claimScratch();
846        m_assembler.loadConstant(address.offset, scr);
847        if (address.base == SH4Registers::r0)
848            m_assembler.movlRegMemr0(src, scr);
849        else {
850            m_assembler.addlRegReg(address.base, scr);
851            m_assembler.movlRegMem(src, scr);
852        }
853        releaseScratch(scr);
854    }
855
856    void store32(RegisterID src, RegisterID dst)
857    {
858        m_assembler.movlRegMem(src, dst);
859    }
860
861    void store32(TrustedImm32 imm, ImplicitAddress address)
862    {
863        RegisterID scr = claimScratch();
864        m_assembler.loadConstant(imm.m_value, scr);
865        store32(scr, address);
866        releaseScratch(scr);
867    }
868
869    void store32(RegisterID src, BaseIndex address)
870    {
871        RegisterID scr = claimScratch();
872
873        move(address.index, scr);
874        lshift32(TrustedImm32(address.scale), scr);
875        add32(address.base, scr);
876        store32(src, Address(scr, address.offset));
877
878        releaseScratch(scr);
879    }
880
881    void store32(TrustedImm32 imm, void* address)
882    {
883        RegisterID scr = claimScratch();
884        RegisterID scr1 = claimScratch();
885        m_assembler.loadConstant(imm.m_value, scr);
886        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr1);
887        m_assembler.movlRegMem(scr, scr1);
888        releaseScratch(scr);
889        releaseScratch(scr1);
890    }
891
892    void store32(RegisterID src, void* address)
893    {
894        RegisterID scr = claimScratch();
895        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
896        m_assembler.movlRegMem(src, scr);
897        releaseScratch(scr);
898    }
899
900    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
901    {
902        RegisterID scr = claimScratch();
903        DataLabel32 label(this);
904        m_assembler.loadConstantUnReusable(address.offset, scr);
905        m_assembler.addlRegReg(address.base, scr);
906        m_assembler.movlMemReg(scr, dest);
907        releaseScratch(scr);
908        return label;
909    }
910
911    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
912    {
913        RegisterID scr = claimScratch();
914        DataLabel32 label(this);
915        m_assembler.loadConstantUnReusable(address.offset, scr);
916        m_assembler.addlRegReg(address.base, scr);
917        m_assembler.movlRegMem(src, scr);
918        releaseScratch(scr);
919        return label;
920    }
921
922    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
923    {
924        DataLabelCompact dataLabel(this);
925        ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
926        ASSERT(address.offset >= 0);
927        m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest);
928        return dataLabel;
929    }
930
931    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
932    {
933        ConvertibleLoadLabel result(this);
934
935        RegisterID scr = claimScratch();
936        m_assembler.movImm8(address.offset, scr);
937        m_assembler.addlRegReg(address.base, scr);
938        m_assembler.movlMemReg(scr, dest);
939        releaseScratch(scr);
940
941        return result;
942    }
943
944    // Floating-point operations
945
946    static bool supportsFloatingPoint() { return true; }
947    static bool supportsFloatingPointTruncate() { return true; }
948    static bool supportsFloatingPointSqrt() { return true; }
949    static bool supportsFloatingPointAbs() { return true; }
950
951    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
952    {
953        m_assembler.fldsfpul((FPRegisterID)(src + 1));
954        m_assembler.stsfpulReg(dest1);
955        m_assembler.fldsfpul(src);
956        m_assembler.stsfpulReg(dest2);
957    }
958
959    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
960    {
961        UNUSED_PARAM(scratch);
962        m_assembler.ldsrmfpul(src1);
963        m_assembler.fstsfpul((FPRegisterID)(dest + 1));
964        m_assembler.ldsrmfpul(src2);
965        m_assembler.fstsfpul(dest);
966    }
967
968    void moveDouble(FPRegisterID src, FPRegisterID dest)
969    {
970        if (src != dest) {
971            m_assembler.fmovsRegReg((FPRegisterID)(src + 1), (FPRegisterID)(dest + 1));
972            m_assembler.fmovsRegReg(src, dest);
973        }
974    }
975
976    void loadFloat(BaseIndex address, FPRegisterID dest)
977    {
978        RegisterID scr = claimScratch();
979
980        move(address.index, scr);
981        lshift32(TrustedImm32(address.scale), scr);
982        add32(address.base, scr);
983        if (address.offset)
984            add32(TrustedImm32(address.offset), scr);
985
986        m_assembler.fmovsReadrm(scr, dest);
987        releaseScratch(scr);
988    }
989
990    void loadDouble(BaseIndex address, FPRegisterID dest)
991    {
992        RegisterID scr = claimScratch();
993
994        move(address.index, scr);
995        lshift32(TrustedImm32(address.scale), scr);
996        add32(address.base, scr);
997        if (address.offset)
998            add32(TrustedImm32(address.offset), scr);
999
1000        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
1001        m_assembler.fmovsReadrm(scr, dest);
1002        releaseScratch(scr);
1003    }
1004
1005    void loadDouble(ImplicitAddress address, FPRegisterID dest)
1006    {
1007        RegisterID scr = claimScratch();
1008
1009        m_assembler.loadConstant(address.offset, scr);
1010        if (address.base == SH4Registers::r0) {
1011            m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1));
1012            m_assembler.addlImm8r(4, scr);
1013            m_assembler.fmovsReadr0r(scr, dest);
1014            releaseScratch(scr);
1015            return;
1016        }
1017
1018        m_assembler.addlRegReg(address.base, scr);
1019        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
1020        m_assembler.fmovsReadrm(scr, dest);
1021        releaseScratch(scr);
1022    }
1023
1024    void loadDouble(const void* address, FPRegisterID dest)
1025    {
1026        RegisterID scr = claimScratch();
1027        m_assembler.loadConstant(reinterpret_cast<uint32_t>(address), scr);
1028        m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1));
1029        m_assembler.fmovsReadrm(scr, dest);
1030        releaseScratch(scr);
1031    }
1032
1033    void storeFloat(FPRegisterID src, BaseIndex address)
1034    {
1035        RegisterID scr = claimScratch();
1036
1037        move(address.index, scr);
1038        lshift32(TrustedImm32(address.scale), scr);
1039        add32(address.base, scr);
1040        if (address.offset)
1041            add32(TrustedImm32(address.offset), scr);
1042
1043        m_assembler.fmovsWriterm(src, scr);
1044
1045        releaseScratch(scr);
1046    }
1047
1048    void storeDouble(FPRegisterID src, ImplicitAddress address)
1049    {
1050        RegisterID scr = claimScratch();
1051        m_assembler.loadConstant(address.offset + 8, scr);
1052        m_assembler.addlRegReg(address.base, scr);
1053        m_assembler.fmovsWriterndec(src, scr);
1054        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
1055        releaseScratch(scr);
1056    }
1057
1058    void storeDouble(FPRegisterID src, BaseIndex address)
1059    {
1060        RegisterID scr = claimScratch();
1061
1062        move(address.index, scr);
1063        lshift32(TrustedImm32(address.scale), scr);
1064        add32(address.base, scr);
1065        add32(TrustedImm32(address.offset + 8), scr);
1066
1067        m_assembler.fmovsWriterndec(src, scr);
1068        m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr);
1069
1070        releaseScratch(scr);
1071    }
1072
1073    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1074    {
1075        if (op1 == dest)
1076            addDouble(op2, dest);
1077        else {
1078            moveDouble(op2, dest);
1079            addDouble(op1, dest);
1080        }
1081    }
1082
1083    void addDouble(FPRegisterID src, FPRegisterID dest)
1084    {
1085        m_assembler.daddRegReg(src, dest);
1086    }
1087
1088    void addDouble(AbsoluteAddress address, FPRegisterID dest)
1089    {
1090        loadDouble(address.m_ptr, fscratch);
1091        addDouble(fscratch, dest);
1092    }
1093
1094    void addDouble(Address address, FPRegisterID dest)
1095    {
1096        loadDouble(address, fscratch);
1097        addDouble(fscratch, dest);
1098    }
1099
1100    void subDouble(FPRegisterID src, FPRegisterID dest)
1101    {
1102        m_assembler.dsubRegReg(src, dest);
1103    }
1104
1105    void subDouble(Address address, FPRegisterID dest)
1106    {
1107        loadDouble(address, fscratch);
1108        subDouble(fscratch, dest);
1109    }
1110
1111    void mulDouble(FPRegisterID src, FPRegisterID dest)
1112    {
1113        m_assembler.dmulRegReg(src, dest);
1114    }
1115
1116    void mulDouble(Address address, FPRegisterID dest)
1117    {
1118        loadDouble(address, fscratch);
1119        mulDouble(fscratch, dest);
1120    }
1121
1122    void divDouble(FPRegisterID src, FPRegisterID dest)
1123    {
1124        m_assembler.ddivRegReg(src, dest);
1125    }
1126
1127    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1128    {
1129        m_assembler.fldsfpul(src);
1130        m_assembler.dcnvsd(dst);
1131    }
1132
1133    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1134    {
1135        m_assembler.dcnvds(src);
1136        m_assembler.fstsfpul(dst);
1137    }
1138
1139    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1140    {
1141        m_assembler.ldsrmfpul(src);
1142        m_assembler.floatfpulDreg(dest);
1143    }
1144
1145    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
1146    {
1147        RegisterID scr = claimScratch();
1148        load32(src.m_ptr, scr);
1149        convertInt32ToDouble(scr, dest);
1150        releaseScratch(scr);
1151    }
1152
1153    void convertInt32ToDouble(Address src, FPRegisterID dest)
1154    {
1155        RegisterID scr = claimScratch();
1156        load32(src, scr);
1157        convertInt32ToDouble(scr, dest);
1158        releaseScratch(scr);
1159    }
1160
1161    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
1162    {
1163        RegisterID scr = claimScratch();
1164        RegisterID scr1 = claimScratch();
1165        Jump m_jump;
1166        JumpList end;
1167
1168        if (dest != SH4Registers::r0)
1169            move(SH4Registers::r0, scr1);
1170
1171        move(address.index, scr);
1172        lshift32(TrustedImm32(address.scale), scr);
1173        add32(address.base, scr);
1174
1175        if (address.offset)
1176            add32(TrustedImm32(address.offset), scr);
1177
1178        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 58, sizeof(uint32_t));
1179        move(scr, SH4Registers::r0);
1180        m_assembler.testlImm8r(0x3, SH4Registers::r0);
1181        m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
1182
1183        if (dest != SH4Registers::r0)
1184            move(scr1, SH4Registers::r0);
1185
1186        load32(scr, dest);
1187        end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
1188        m_assembler.nop();
1189        m_jump.link(this);
1190        m_assembler.testlImm8r(0x1, SH4Registers::r0);
1191
1192        if (dest != SH4Registers::r0)
1193            move(scr1, SH4Registers::r0);
1194
1195        m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear);
1196        load16PostInc(scr, scr1);
1197        load16(scr, dest);
1198        m_assembler.shllImm8r(16, dest);
1199        or32(scr1, dest);
1200        end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear));
1201        m_assembler.nop();
1202        m_jump.link(this);
1203        load8PostInc(scr, scr1);
1204        load16PostInc(scr, dest);
1205        m_assembler.shllImm8r(8, dest);
1206        or32(dest, scr1);
1207        load8(scr, dest);
1208        m_assembler.shllImm8r(8, dest);
1209        m_assembler.shllImm8r(16, dest);
1210        or32(scr1, dest);
1211        end.link(this);
1212
1213        releaseScratch(scr);
1214        releaseScratch(scr1);
1215    }
1216
1217    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1218    {
1219        RegisterID scr = scratchReg3;
1220        load32WithUnalignedHalfWords(left, scr);
1221        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1222            m_assembler.testlRegReg(scr, scr);
1223        else
1224            compare32(right.m_value, scr, cond);
1225
1226        if (cond == NotEqual)
1227            return branchFalse();
1228        return branchTrue();
1229    }
1230
1231    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
1232    {
1233        m_assembler.movImm8(0, scratchReg3);
1234        convertInt32ToDouble(scratchReg3, scratch);
1235        return branchDouble(DoubleNotEqual, reg, scratch);
1236    }
1237
1238    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
1239    {
1240        m_assembler.movImm8(0, scratchReg3);
1241        convertInt32ToDouble(scratchReg3, scratch);
1242        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
1243    }
1244
1245    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1246    {
1247        if (cond == DoubleEqual) {
1248            m_assembler.dcmppeq(right, left);
1249            return branchTrue();
1250        }
1251
1252        if (cond == DoubleNotEqual) {
1253            JumpList end;
1254            m_assembler.dcmppeq(left, left);
1255            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1256            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1257            m_assembler.dcmppeq(right, right);
1258            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1259            m_assembler.dcmppeq(right, left);
1260            Jump m_jump = branchFalse();
1261            end.link(this);
1262            return m_jump;
1263        }
1264
1265        if (cond == DoubleGreaterThan) {
1266            m_assembler.dcmppgt(right, left);
1267            return branchTrue();
1268        }
1269
1270        if (cond == DoubleGreaterThanOrEqual) {
1271            JumpList end;
1272            m_assembler.dcmppeq(left, left);
1273            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1274            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1275            m_assembler.dcmppeq(right, right);
1276            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1277            m_assembler.dcmppgt(left, right);
1278            Jump m_jump = branchFalse();
1279            end.link(this);
1280            return m_jump;
1281        }
1282
1283        if (cond == DoubleLessThan) {
1284            m_assembler.dcmppgt(left, right);
1285            return branchTrue();
1286        }
1287
1288        if (cond == DoubleLessThanOrEqual) {
1289            JumpList end;
1290            m_assembler.dcmppeq(left, left);
1291            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1292            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1293            m_assembler.dcmppeq(right, right);
1294            end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1295            m_assembler.dcmppgt(right, left);
1296            Jump m_jump = branchFalse();
1297            end.link(this);
1298            return m_jump;
1299        }
1300
1301        if (cond == DoubleEqualOrUnordered) {
1302            JumpList takeBranch;
1303            m_assembler.dcmppeq(left, left);
1304            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1305            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1306            m_assembler.dcmppeq(right, right);
1307            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1308            m_assembler.dcmppeq(left, right);
1309            Jump m_jump = Jump(m_assembler.je());
1310            takeBranch.link(this);
1311            m_assembler.extraInstrForBranch(scratchReg3);
1312            return m_jump;
1313        }
1314
1315        if (cond == DoubleGreaterThanOrUnordered) {
1316            JumpList takeBranch;
1317            m_assembler.dcmppeq(left, left);
1318            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1319            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1320            m_assembler.dcmppeq(right, right);
1321            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1322            m_assembler.dcmppgt(right, left);
1323            Jump m_jump = Jump(m_assembler.je());
1324            takeBranch.link(this);
1325            m_assembler.extraInstrForBranch(scratchReg3);
1326            return m_jump;
1327        }
1328
1329        if (cond == DoubleGreaterThanOrEqualOrUnordered) {
1330            m_assembler.dcmppgt(left, right);
1331            return branchFalse();
1332        }
1333
1334        if (cond == DoubleLessThanOrUnordered) {
1335            JumpList takeBranch;
1336            m_assembler.dcmppeq(left, left);
1337            m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t));
1338            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1339            m_assembler.dcmppeq(right, right);
1340            takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear));
1341            m_assembler.dcmppgt(left, right);
1342            Jump m_jump = Jump(m_assembler.je());
1343            takeBranch.link(this);
1344            m_assembler.extraInstrForBranch(scratchReg3);
1345            return m_jump;
1346        }
1347
1348        if (cond == DoubleLessThanOrEqualOrUnordered) {
1349            m_assembler.dcmppgt(right, left);
1350            return branchFalse();
1351        }
1352
1353        ASSERT(cond == DoubleNotEqualOrUnordered);
1354        m_assembler.dcmppeq(right, left);
1355        return branchFalse();
1356    }
1357
1358    Jump branchTrue()
1359    {
1360        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
1361        Jump m_jump = Jump(m_assembler.je());
1362        m_assembler.extraInstrForBranch(scratchReg3);
1363        return m_jump;
1364    }
1365
1366    Jump branchFalse()
1367    {
1368        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t));
1369        Jump m_jump = Jump(m_assembler.jne());
1370        m_assembler.extraInstrForBranch(scratchReg3);
1371        return m_jump;
1372    }
1373
1374    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1375    {
1376        RegisterID scr = claimScratch();
1377        move(left.index, scr);
1378        lshift32(TrustedImm32(left.scale), scr);
1379        add32(left.base, scr);
1380        load32(scr, left.offset, scr);
1381        compare32(right.m_value, scr, cond);
1382        releaseScratch(scr);
1383
1384        if (cond == NotEqual)
1385            return branchFalse();
1386        return branchTrue();
1387    }
1388
1389    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1390    {
1391        moveDouble(src, dest);
1392        m_assembler.dsqrt(dest);
1393    }
1394
1395    void absDouble(FPRegisterID src, FPRegisterID dest)
1396    {
1397        moveDouble(src, dest);
1398        m_assembler.dabs(dest);
1399    }
1400
1401    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1402    {
1403        RegisterID addressTempRegister = claimScratch();
1404        load8(address, addressTempRegister);
1405        Jump jmp = branchTest32(cond, addressTempRegister, mask);
1406        releaseScratch(addressTempRegister);
1407        return jmp;
1408    }
1409
1410    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
1411    {
1412        RegisterID addressTempRegister = claimScratch();
1413        move(TrustedImmPtr(address.m_ptr), addressTempRegister);
1414        load8(Address(addressTempRegister), addressTempRegister);
1415        Jump jmp = branchTest32(cond, addressTempRegister, mask);
1416        releaseScratch(addressTempRegister);
1417        return jmp;
1418    }
1419
1420    void signExtend32ToPtr(RegisterID src, RegisterID dest)
1421    {
1422        if (src != dest)
1423            move(src, dest);
1424    }
1425
1426    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1427    {
1428        RegisterID addressTempRegister = claimScratch();
1429        load8(left, addressTempRegister);
1430        Jump jmp = branch32(cond, addressTempRegister, right);
1431        releaseScratch(addressTempRegister);
1432        return jmp;
1433    }
1434
1435    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1436    {
1437        RegisterID addressTempRegister = claimScratch();
1438        load8(left, addressTempRegister);
1439        compare32(cond, addressTempRegister, right, dest);
1440        releaseScratch(addressTempRegister);
1441    }
1442
1443    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1444    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1445    {
1446        m_assembler.ftrcdrmfpul(src);
1447        m_assembler.stsfpulReg(dest);
1448        m_assembler.loadConstant(0x7fffffff, scratchReg3);
1449        m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
1450        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 14, sizeof(uint32_t));
1451        m_assembler.branch(BT_OPCODE, 2);
1452        m_assembler.addlImm8r(1, scratchReg3);
1453        m_assembler.cmplRegReg(dest, scratchReg3, SH4Condition(Equal));
1454        return (branchType == BranchIfTruncateFailed) ? branchTrue() : branchFalse();
1455    }
1456
1457    // Stack manipulation operations
1458
1459    void pop(RegisterID dest)
1460    {
1461        m_assembler.popReg(dest);
1462    }
1463
1464    void push(RegisterID src)
1465    {
1466        m_assembler.pushReg(src);
1467    }
1468
1469    void push(TrustedImm32 imm)
1470    {
1471        RegisterID scr = claimScratch();
1472        m_assembler.loadConstant(imm.m_value, scr);
1473        push(scr);
1474        releaseScratch(scr);
1475    }
1476
1477    // Register move operations
1478
1479    void move(TrustedImm32 imm, RegisterID dest)
1480    {
1481        m_assembler.loadConstant(imm.m_value, dest);
1482    }
1483
1484    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
1485    {
1486        m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t));
1487        DataLabelPtr dataLabel(this);
1488        m_assembler.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue.m_value), dest);
1489        return dataLabel;
1490    }
1491
1492    void move(RegisterID src, RegisterID dest)
1493    {
1494        if (src != dest)
1495            m_assembler.movlRegReg(src, dest);
1496    }
1497
1498    void move(TrustedImmPtr imm, RegisterID dest)
1499    {
1500        m_assembler.loadConstant(imm.asIntptr(), dest);
1501    }
1502
1503    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1504    {
1505        m_assembler.cmplRegReg(right, left, SH4Condition(cond));
1506        if (cond != NotEqual) {
1507            m_assembler.movt(dest);
1508            return;
1509        }
1510
1511        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
1512        m_assembler.movImm8(0, dest);
1513        m_assembler.branch(BT_OPCODE, 0);
1514        m_assembler.movImm8(1, dest);
1515    }
1516
1517    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1518    {
1519        if (left != dest) {
1520            move(right, dest);
1521            compare32(cond, left, dest, dest);
1522            return;
1523        }
1524
1525        RegisterID scr = claimScratch();
1526        move(right, scr);
1527        compare32(cond, left, scr, dest);
1528        releaseScratch(scr);
1529    }
1530
1531    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1532    {
1533        ASSERT((cond == Zero) || (cond == NonZero));
1534
1535        load8(address, dest);
1536        if (mask.m_value == -1)
1537            compare32(0, dest, static_cast<RelationalCondition>(cond));
1538        else
1539            testlImm(mask.m_value, dest);
1540        if (cond != NonZero) {
1541            m_assembler.movt(dest);
1542            return;
1543        }
1544
1545        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4);
1546        m_assembler.movImm8(0, dest);
1547        m_assembler.branch(BT_OPCODE, 0);
1548        m_assembler.movImm8(1, dest);
1549    }
1550
1551    void loadPtrLinkReg(ImplicitAddress address)
1552    {
1553        RegisterID scr = claimScratch();
1554        load32(address, scr);
1555        m_assembler.ldspr(scr);
1556        releaseScratch(scr);
1557    }
1558
1559    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1560    {
1561        m_assembler.cmplRegReg(right, left, SH4Condition(cond));
1562        /* BT label => BF off
1563           nop         LDR reg
1564           nop         braf @reg
1565           nop         nop
1566         */
1567        if (cond == NotEqual)
1568            return branchFalse();
1569        return branchTrue();
1570    }
1571
1572    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1573    {
1574        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1575            m_assembler.testlRegReg(left, left);
1576        else
1577            compare32(right.m_value, left, cond);
1578
1579        if (cond == NotEqual)
1580            return branchFalse();
1581        return branchTrue();
1582    }
1583
1584    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1585    {
1586        compare32(right.offset, right.base, left, cond);
1587        if (cond == NotEqual)
1588            return branchFalse();
1589        return branchTrue();
1590    }
1591
1592    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1593    {
1594        compare32(right, left.offset, left.base, cond);
1595        if (cond == NotEqual)
1596            return branchFalse();
1597        return branchTrue();
1598    }
1599
1600    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1601    {
1602        compare32(right.m_value, left.offset, left.base, cond);
1603        if (cond == NotEqual)
1604            return branchFalse();
1605        return branchTrue();
1606    }
1607
1608    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1609    {
1610        RegisterID scr = claimScratch();
1611
1612        load32(left.m_ptr, scr);
1613        m_assembler.cmplRegReg(right, scr, SH4Condition(cond));
1614        releaseScratch(scr);
1615
1616        if (cond == NotEqual)
1617            return branchFalse();
1618        return branchTrue();
1619    }
1620
1621    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1622    {
1623        RegisterID addressTempRegister = claimScratch();
1624
1625        m_assembler.loadConstant(reinterpret_cast<uint32_t>(left.m_ptr), addressTempRegister);
1626        m_assembler.movlMemReg(addressTempRegister, addressTempRegister);
1627        compare32(right.m_value, addressTempRegister, cond);
1628        releaseScratch(addressTempRegister);
1629
1630        if (cond == NotEqual)
1631            return branchFalse();
1632        return branchTrue();
1633    }
1634
1635    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1636    {
1637        ASSERT(!(right.m_value & 0xFFFFFF00));
1638        RegisterID scr = claimScratch();
1639
1640        move(left.index, scr);
1641        lshift32(TrustedImm32(left.scale), scr);
1642
1643        if (left.offset)
1644            add32(TrustedImm32(left.offset), scr);
1645        add32(left.base, scr);
1646        load8(scr, scr);
1647        RegisterID scr1 = claimScratch();
1648        m_assembler.loadConstant(right.m_value, scr1);
1649        releaseScratch(scr);
1650        releaseScratch(scr1);
1651
1652        return branch32(cond, scr, scr1);
1653    }
1654
1655    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1656    {
1657        ASSERT((cond == Zero) || (cond == NonZero));
1658
1659        m_assembler.testlRegReg(reg, mask);
1660
1661        if (cond == NonZero) // NotEqual
1662            return branchFalse();
1663        return branchTrue();
1664    }
1665
1666    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1667    {
1668        ASSERT((cond == Zero) || (cond == NonZero));
1669
1670        if (mask.m_value == -1)
1671            m_assembler.testlRegReg(reg, reg);
1672        else
1673            testlImm(mask.m_value, reg);
1674
1675        if (cond == NonZero) // NotEqual
1676            return branchFalse();
1677        return branchTrue();
1678    }
1679
1680    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1681    {
1682        ASSERT((cond == Zero) || (cond == NonZero));
1683
1684        if (mask.m_value == -1)
1685            compare32(0, address.offset, address.base, static_cast<RelationalCondition>(cond));
1686        else
1687            testImm(mask.m_value, address.offset, address.base);
1688
1689        if (cond == NonZero) // NotEqual
1690            return branchFalse();
1691        return branchTrue();
1692    }
1693
1694    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1695    {
1696        RegisterID scr = claimScratch();
1697
1698        move(address.index, scr);
1699        lshift32(TrustedImm32(address.scale), scr);
1700        add32(address.base, scr);
1701        load32(scr, address.offset, scr);
1702
1703        if (mask.m_value == -1)
1704            m_assembler.testlRegReg(scr, scr);
1705        else
1706            testlImm(mask.m_value, scr);
1707
1708        releaseScratch(scr);
1709
1710        if (cond == NonZero) // NotEqual
1711            return branchFalse();
1712        return branchTrue();
1713    }
1714
1715    Jump jump()
1716    {
1717        return Jump(m_assembler.jmp());
1718    }
1719
1720    void jump(RegisterID target)
1721    {
1722        m_assembler.jmpReg(target);
1723    }
1724
1725    void jump(Address address)
1726    {
1727        RegisterID scr = claimScratch();
1728
1729        if ((address.offset < 0) || (address.offset >= 64)) {
1730            m_assembler.loadConstant(address.offset, scr);
1731            m_assembler.addlRegReg(address.base, scr);
1732            m_assembler.movlMemReg(scr, scr);
1733        } else if (address.offset)
1734            m_assembler.movlMemReg(address.offset >> 2, address.base, scr);
1735        else
1736            m_assembler.movlMemReg(address.base, scr);
1737        m_assembler.jmpReg(scr);
1738
1739        releaseScratch(scr);
1740    }
1741
1742    // Arithmetic control flow operations
1743
1744    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1745    {
1746        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
1747
1748        if (cond == Overflow) {
1749            m_assembler.addvlRegReg(src, dest);
1750            return branchTrue();
1751        }
1752
1753        if (cond == Signed) {
1754            m_assembler.addlRegReg(src, dest);
1755            // Check if dest is negative
1756            m_assembler.cmppz(dest);
1757            return branchFalse();
1758        }
1759
1760        if (cond == PositiveOrZero) {
1761            m_assembler.addlRegReg(src, dest);
1762            m_assembler.cmppz(dest);
1763            return branchTrue();
1764        }
1765
1766        m_assembler.addlRegReg(src, dest);
1767        compare32(0, dest, Equal);
1768
1769        if (cond == NonZero) // NotEqual
1770            return branchFalse();
1771        return branchTrue();
1772    }
1773
1774    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1775    {
1776        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
1777
1778        move(imm, scratchReg3);
1779        return branchAdd32(cond, scratchReg3, dest);
1780    }
1781
1782    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1783    {
1784        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
1785
1786        if (src != dest)
1787            move(src, dest);
1788
1789        if (cond == Overflow) {
1790            move(imm, scratchReg3);
1791            m_assembler.addvlRegReg(scratchReg3, dest);
1792            return branchTrue();
1793        }
1794
1795        add32(imm, dest);
1796
1797        if (cond == Signed) {
1798            m_assembler.cmppz(dest);
1799            return branchFalse();
1800        }
1801
1802        if (cond == PositiveOrZero) {
1803            m_assembler.cmppz(dest);
1804            return branchTrue();
1805        }
1806
1807        compare32(0, dest, Equal);
1808
1809        if (cond == NonZero) // NotEqual
1810            return branchFalse();
1811        return branchTrue();
1812    }
1813
1814    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
1815    {
1816        ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
1817        bool result;
1818
1819        move(imm, scratchReg3);
1820        RegisterID destptr = claimScratch();
1821        RegisterID destval = claimScratch();
1822        m_assembler.loadConstant(reinterpret_cast<uint32_t>(dest.m_ptr), destptr);
1823        m_assembler.movlMemReg(destptr, destval);
1824        if (cond == Overflow) {
1825            m_assembler.addvlRegReg(scratchReg3, destval);
1826            result = true;
1827        } else {
1828            m_assembler.addlRegReg(scratchReg3, destval);
1829            if (cond == Signed) {
1830                m_assembler.cmppz(destval);
1831                result = false;
1832            } else if (cond == PositiveOrZero) {
1833                m_assembler.cmppz(destval);
1834                result = true;
1835            } else {
1836                m_assembler.movImm8(0, scratchReg3);
1837                m_assembler.cmplRegReg(scratchReg3, destval, SH4Condition(cond));
1838                result = (cond == Zero);
1839            }
1840        }
1841        m_assembler.movlRegMem(destval, destptr);
1842        releaseScratch(destval);
1843        releaseScratch(destptr);
1844        return result ? branchTrue() : branchFalse();
1845    }
1846
1847    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1848    {
1849        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1850
1851        if (cond == Overflow) {
1852            RegisterID scrsign = claimScratch();
1853            RegisterID msbres = claimScratch();
1854            m_assembler.dmulslRegReg(src, dest);
1855            m_assembler.stsmacl(dest);
1856            m_assembler.cmppz(dest);
1857            m_assembler.movt(scrsign);
1858            m_assembler.addlImm8r(-1, scrsign);
1859            m_assembler.stsmach(msbres);
1860            m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal));
1861            releaseScratch(msbres);
1862            releaseScratch(scrsign);
1863            return branchFalse();
1864        }
1865
1866        m_assembler.imullRegReg(src, dest);
1867        m_assembler.stsmacl(dest);
1868        if (cond == Signed) {
1869            // Check if dest is negative
1870            m_assembler.cmppz(dest);
1871            return branchFalse();
1872        }
1873
1874        compare32(0, dest, static_cast<RelationalCondition>(cond));
1875
1876        if (cond == NonZero) // NotEqual
1877            return branchFalse();
1878        return branchTrue();
1879    }
1880
1881    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1882    {
1883        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1884
1885        move(imm, scratchReg3);
1886        if (src != dest)
1887            move(src, dest);
1888
1889        return branchMul32(cond, scratchReg3, dest);
1890    }
1891
1892    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1893    {
1894        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1895
1896        if (cond == Overflow) {
1897            m_assembler.subvlRegReg(src, dest);
1898            return branchTrue();
1899        }
1900
1901        if (cond == Signed) {
1902            // Check if dest is negative
1903            m_assembler.sublRegReg(src, dest);
1904            compare32(0, dest, LessThan);
1905            return branchTrue();
1906        }
1907
1908        sub32(src, dest);
1909        compare32(0, dest, static_cast<RelationalCondition>(cond));
1910
1911        if (cond == NonZero) // NotEqual
1912            return branchFalse();
1913        return branchTrue();
1914    }
1915
1916    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1917    {
1918        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
1919
1920        move(imm, scratchReg3);
1921        return branchSub32(cond, scratchReg3, dest);
1922    }
1923
1924    Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1925    {
1926        move(imm, scratchReg3);
1927        if (src != dest)
1928            move(src, dest);
1929        return branchSub32(cond, scratchReg3, dest);
1930    }
1931
1932    Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1933    {
1934        if (src1 != dest)
1935            move(src1, dest);
1936        return branchSub32(cond, src2, dest);
1937    }
1938
1939    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1940    {
1941        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
1942
1943        if (cond == Signed) {
1944            or32(src, dest);
1945            compare32(0, dest, static_cast<RelationalCondition>(LessThan));
1946            return branchTrue();
1947        }
1948
1949        or32(src, dest);
1950        compare32(0, dest, static_cast<RelationalCondition>(cond));
1951
1952        if (cond == NonZero) // NotEqual
1953            return branchFalse();
1954        return branchTrue();
1955    }
1956
1957    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
1958    {
1959        m_assembler.ftrcdrmfpul(src);
1960        m_assembler.stsfpulReg(dest);
1961        convertInt32ToDouble(dest, fscratch);
1962        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src));
1963
1964        if (negZeroCheck) {
1965            if (dest == SH4Registers::r0)
1966                m_assembler.cmpEqImmR0(0, dest);
1967            else {
1968                m_assembler.movImm8(0, scratchReg3);
1969                m_assembler.cmplRegReg(scratchReg3, dest, SH4Condition(Equal));
1970            }
1971            failureCases.append(branchTrue());
1972        }
1973    }
1974
1975    void neg32(RegisterID dst)
1976    {
1977        m_assembler.neg(dst, dst);
1978    }
1979
1980    void urshift32(RegisterID shiftamount, RegisterID dest)
1981    {
1982        RegisterID shiftTmp = claimScratch();
1983        m_assembler.loadConstant(0x1f, shiftTmp);
1984        m_assembler.andlRegReg(shiftamount, shiftTmp);
1985        m_assembler.neg(shiftTmp, shiftTmp);
1986        m_assembler.shldRegReg(dest, shiftTmp);
1987        releaseScratch(shiftTmp);
1988    }
1989
1990    void urshift32(TrustedImm32 imm, RegisterID dest)
1991    {
1992        int immMasked = imm.m_value & 0x1f;
1993        if (!immMasked)
1994            return;
1995
1996        if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) {
1997            m_assembler.shlrImm8r(immMasked, dest);
1998            return;
1999        }
2000
2001        RegisterID shiftTmp = claimScratch();
2002        m_assembler.loadConstant(-immMasked, shiftTmp);
2003        m_assembler.shldRegReg(dest, shiftTmp);
2004        releaseScratch(shiftTmp);
2005    }
2006
2007    void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest)
2008    {
2009        if (src != dest)
2010            move(src, dest);
2011
2012        urshift32(shiftamount, dest);
2013    }
2014
2015    Call call()
2016    {
2017        return Call(m_assembler.call(), Call::Linkable);
2018    }
2019
2020    Call nearCall()
2021    {
2022        return Call(m_assembler.call(), Call::LinkableNear);
2023    }
2024
2025    Call call(RegisterID target)
2026    {
2027        return Call(m_assembler.call(target), Call::None);
2028    }
2029
2030    void call(Address address, RegisterID target)
2031    {
2032        load32(address.base, address.offset, target);
2033        m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2);
2034        m_assembler.branch(JSR_OPCODE, target);
2035        m_assembler.nop();
2036    }
2037
2038    void breakpoint()
2039    {
2040        m_assembler.bkpt();
2041        m_assembler.nop();
2042    }
2043
2044    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2045    {
2046        RegisterID dataTempRegister = claimScratch();
2047
2048        dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
2049        m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond));
2050        releaseScratch(dataTempRegister);
2051
2052        if (cond == NotEqual)
2053            return branchFalse();
2054        return branchTrue();
2055    }
2056
2057    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2058    {
2059        RegisterID scr = claimScratch();
2060
2061        m_assembler.loadConstant(left.offset, scr);
2062        m_assembler.addlRegReg(left.base, scr);
2063        m_assembler.movlMemReg(scr, scr);
2064        RegisterID scr1 = claimScratch();
2065        dataLabel = moveWithPatch(initialRightValue, scr1);
2066        m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond));
2067        releaseScratch(scr);
2068        releaseScratch(scr1);
2069
2070        if (cond == NotEqual)
2071            return branchFalse();
2072        return branchTrue();
2073    }
2074
2075    void ret()
2076    {
2077        m_assembler.ret();
2078        m_assembler.nop();
2079    }
2080
2081    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
2082    {
2083        RegisterID scr = claimScratch();
2084        DataLabelPtr label = moveWithPatch(initialValue, scr);
2085        store32(scr, address);
2086        releaseScratch(scr);
2087        return label;
2088    }
2089
2090    DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); }
2091
2092    int sizeOfConstantPool()
2093    {
2094        return m_assembler.sizeOfConstantPool();
2095    }
2096
2097    Call tailRecursiveCall()
2098    {
2099        RegisterID scr = claimScratch();
2100
2101        m_assembler.loadConstantUnReusable(0x0, scr, true);
2102        Jump m_jump = Jump(m_assembler.jmp(scr));
2103        releaseScratch(scr);
2104
2105        return Call::fromTailJump(m_jump);
2106    }
2107
2108    Call makeTailRecursiveCall(Jump oldJump)
2109    {
2110        oldJump.link(this);
2111        return tailRecursiveCall();
2112    }
2113
2114    void nop()
2115    {
2116        m_assembler.nop();
2117    }
2118
2119    static FunctionPtr readCallTarget(CodeLocationCall call)
2120    {
2121        return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation())));
2122    }
2123
2124    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
2125    {
2126        SH4Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
2127    }
2128
2129    static ptrdiff_t maxJumpReplacementSize()
2130    {
2131        return SH4Assembler::maxJumpReplacementSize();
2132    }
2133
2134    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2135
2136    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
2137    {
2138        return label.labelAtOffset(0);
2139    }
2140
2141    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
2142    {
2143        SH4Assembler::revertJump(instructionStart.dataLocation(), initialValue);
2144    }
2145
2146    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2147    {
2148        UNREACHABLE_FOR_PLATFORM();
2149        return CodeLocationLabel();
2150    }
2151
2152    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue)
2153    {
2154        UNREACHABLE_FOR_PLATFORM();
2155    }
2156
2157protected:
2158    SH4Assembler::Condition SH4Condition(RelationalCondition cond)
2159    {
2160        return static_cast<SH4Assembler::Condition>(cond);
2161    }
2162
2163    SH4Assembler::Condition SH4Condition(ResultCondition cond)
2164    {
2165        return static_cast<SH4Assembler::Condition>(cond);
2166    }
2167private:
2168    friend class LinkBuffer;
2169    friend class RepatchBuffer;
2170
2171    static void linkCall(void* code, Call call, FunctionPtr function)
2172    {
2173        SH4Assembler::linkCall(code, call.m_label, function.value());
2174    }
2175
2176    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2177    {
2178        SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
2179    }
2180
2181    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2182    {
2183        SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
2184    }
2185};
2186
2187} // namespace JSC
2188
2189#endif // ENABLE(ASSEMBLER)
2190
2191#endif // MacroAssemblerSH4_h
2192