UnwindRegistersRestore.S revision 335370
1//===-------------------- UnwindRegistersRestore.S ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "assembly.h"
11
12  .text
13
14#if defined(__i386__)
15DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
16#
17# void libunwind::Registers_x86::jumpto()
18#
19# On entry:
20#  +                       +
21#  +-----------------------+
22#  + thread_state pointer  +
23#  +-----------------------+
24#  + return address        +
25#  +-----------------------+   <-- SP
26#  +                       +
27  movl   4(%esp), %eax
28  # set up eax and ret on new stack location
29  movl  28(%eax), %edx # edx holds new stack pointer
30  subl  $8,%edx
31  movl  %edx, 28(%eax)
32  movl  0(%eax), %ebx
33  movl  %ebx, 0(%edx)
34  movl  40(%eax), %ebx
35  movl  %ebx, 4(%edx)
36  # we now have ret and eax pushed onto where new stack will be
37  # restore all registers
38  movl   4(%eax), %ebx
39  movl   8(%eax), %ecx
40  movl  12(%eax), %edx
41  movl  16(%eax), %edi
42  movl  20(%eax), %esi
43  movl  24(%eax), %ebp
44  movl  28(%eax), %esp
45  # skip ss
46  # skip eflags
47  pop    %eax  # eax was already pushed on new stack
48  ret        # eip was already pushed on new stack
49  # skip cs
50  # skip ds
51  # skip es
52  # skip fs
53  # skip gs
54
55#elif defined(__x86_64__)
56
57DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
58#
59# void libunwind::Registers_x86_64::jumpto()
60#
61# On entry, thread_state pointer is in rdi
62
63  movq  56(%rdi), %rax # rax holds new stack pointer
64  subq  $16, %rax
65  movq  %rax, 56(%rdi)
66  movq  32(%rdi), %rbx  # store new rdi on new stack
67  movq  %rbx, 0(%rax)
68  movq  128(%rdi), %rbx # store new rip on new stack
69  movq  %rbx, 8(%rax)
70  # restore all registers
71  movq    0(%rdi), %rax
72  movq    8(%rdi), %rbx
73  movq   16(%rdi), %rcx
74  movq   24(%rdi), %rdx
75  # restore rdi later
76  movq   40(%rdi), %rsi
77  movq   48(%rdi), %rbp
78  # restore rsp later
79  movq   64(%rdi), %r8
80  movq   72(%rdi), %r9
81  movq   80(%rdi), %r10
82  movq   88(%rdi), %r11
83  movq   96(%rdi), %r12
84  movq  104(%rdi), %r13
85  movq  112(%rdi), %r14
86  movq  120(%rdi), %r15
87  # skip rflags
88  # skip cs
89  # skip fs
90  # skip gs
91  movq  56(%rdi), %rsp  # cut back rsp to new location
92  pop    %rdi      # rdi was saved here earlier
93  ret            # rip was saved here
94
95
96#elif defined(__ppc__)
97
98DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
99;
100; void libunwind::Registers_ppc::jumpto()
101;
102; On entry:
103;  thread_state pointer is in r3
104;
105
106  ; restore integral registerrs
107  ; skip r0 for now
108  ; skip r1 for now
109  lwz     r2, 16(r3)
110  ; skip r3 for now
111  ; skip r4 for now
112  ; skip r5 for now
113  lwz     r6, 32(r3)
114  lwz     r7, 36(r3)
115  lwz     r8, 40(r3)
116  lwz     r9, 44(r3)
117  lwz    r10, 48(r3)
118  lwz    r11, 52(r3)
119  lwz    r12, 56(r3)
120  lwz    r13, 60(r3)
121  lwz    r14, 64(r3)
122  lwz    r15, 68(r3)
123  lwz    r16, 72(r3)
124  lwz    r17, 76(r3)
125  lwz    r18, 80(r3)
126  lwz    r19, 84(r3)
127  lwz    r20, 88(r3)
128  lwz    r21, 92(r3)
129  lwz    r22, 96(r3)
130  lwz    r23,100(r3)
131  lwz    r24,104(r3)
132  lwz    r25,108(r3)
133  lwz    r26,112(r3)
134  lwz    r27,116(r3)
135  lwz    r28,120(r3)
136  lwz    r29,124(r3)
137  lwz    r30,128(r3)
138  lwz    r31,132(r3)
139
140  ; restore float registers
141  lfd    f0, 160(r3)
142  lfd    f1, 168(r3)
143  lfd    f2, 176(r3)
144  lfd    f3, 184(r3)
145  lfd    f4, 192(r3)
146  lfd    f5, 200(r3)
147  lfd    f6, 208(r3)
148  lfd    f7, 216(r3)
149  lfd    f8, 224(r3)
150  lfd    f9, 232(r3)
151  lfd    f10,240(r3)
152  lfd    f11,248(r3)
153  lfd    f12,256(r3)
154  lfd    f13,264(r3)
155  lfd    f14,272(r3)
156  lfd    f15,280(r3)
157  lfd    f16,288(r3)
158  lfd    f17,296(r3)
159  lfd    f18,304(r3)
160  lfd    f19,312(r3)
161  lfd    f20,320(r3)
162  lfd    f21,328(r3)
163  lfd    f22,336(r3)
164  lfd    f23,344(r3)
165  lfd    f24,352(r3)
166  lfd    f25,360(r3)
167  lfd    f26,368(r3)
168  lfd    f27,376(r3)
169  lfd    f28,384(r3)
170  lfd    f29,392(r3)
171  lfd    f30,400(r3)
172  lfd    f31,408(r3)
173
174  ; restore vector registers if any are in use
175  lwz    r5,156(r3)  ; test VRsave
176  cmpwi  r5,0
177  beq    Lnovec
178
179  subi  r4,r1,16
180  rlwinm  r4,r4,0,0,27  ; mask low 4-bits
181  ; r4 is now a 16-byte aligned pointer into the red zone
182  ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
183
184
185#define LOAD_VECTOR_UNALIGNEDl(_index) \
186  andis.  r0,r5,(1<<(15-_index))  @\
187  beq    Ldone  ## _index     @\
188  lwz    r0, 424+_index*16(r3)  @\
189  stw    r0, 0(r4)        @\
190  lwz    r0, 424+_index*16+4(r3)  @\
191  stw    r0, 4(r4)        @\
192  lwz    r0, 424+_index*16+8(r3)  @\
193  stw    r0, 8(r4)        @\
194  lwz    r0, 424+_index*16+12(r3)@\
195  stw    r0, 12(r4)        @\
196  lvx    v ## _index,0,r4    @\
197Ldone  ## _index:
198
199#define LOAD_VECTOR_UNALIGNEDh(_index) \
200  andi.  r0,r5,(1<<(31-_index))  @\
201  beq    Ldone  ## _index    @\
202  lwz    r0, 424+_index*16(r3)  @\
203  stw    r0, 0(r4)        @\
204  lwz    r0, 424+_index*16+4(r3)  @\
205  stw    r0, 4(r4)        @\
206  lwz    r0, 424+_index*16+8(r3)  @\
207  stw    r0, 8(r4)        @\
208  lwz    r0, 424+_index*16+12(r3)@\
209  stw    r0, 12(r4)        @\
210  lvx    v ## _index,0,r4    @\
211  Ldone  ## _index:
212
213
214  LOAD_VECTOR_UNALIGNEDl(0)
215  LOAD_VECTOR_UNALIGNEDl(1)
216  LOAD_VECTOR_UNALIGNEDl(2)
217  LOAD_VECTOR_UNALIGNEDl(3)
218  LOAD_VECTOR_UNALIGNEDl(4)
219  LOAD_VECTOR_UNALIGNEDl(5)
220  LOAD_VECTOR_UNALIGNEDl(6)
221  LOAD_VECTOR_UNALIGNEDl(7)
222  LOAD_VECTOR_UNALIGNEDl(8)
223  LOAD_VECTOR_UNALIGNEDl(9)
224  LOAD_VECTOR_UNALIGNEDl(10)
225  LOAD_VECTOR_UNALIGNEDl(11)
226  LOAD_VECTOR_UNALIGNEDl(12)
227  LOAD_VECTOR_UNALIGNEDl(13)
228  LOAD_VECTOR_UNALIGNEDl(14)
229  LOAD_VECTOR_UNALIGNEDl(15)
230  LOAD_VECTOR_UNALIGNEDh(16)
231  LOAD_VECTOR_UNALIGNEDh(17)
232  LOAD_VECTOR_UNALIGNEDh(18)
233  LOAD_VECTOR_UNALIGNEDh(19)
234  LOAD_VECTOR_UNALIGNEDh(20)
235  LOAD_VECTOR_UNALIGNEDh(21)
236  LOAD_VECTOR_UNALIGNEDh(22)
237  LOAD_VECTOR_UNALIGNEDh(23)
238  LOAD_VECTOR_UNALIGNEDh(24)
239  LOAD_VECTOR_UNALIGNEDh(25)
240  LOAD_VECTOR_UNALIGNEDh(26)
241  LOAD_VECTOR_UNALIGNEDh(27)
242  LOAD_VECTOR_UNALIGNEDh(28)
243  LOAD_VECTOR_UNALIGNEDh(29)
244  LOAD_VECTOR_UNALIGNEDh(30)
245  LOAD_VECTOR_UNALIGNEDh(31)
246
247Lnovec:
248  lwz    r0, 136(r3) ; __cr
249  mtocrf  255,r0
250  lwz    r0, 148(r3) ; __ctr
251  mtctr  r0
252  lwz    r0, 0(r3)  ; __ssr0
253  mtctr  r0
254  lwz    r0, 8(r3)  ; do r0 now
255  lwz    r5,28(r3)  ; do r5 now
256  lwz    r4,24(r3)  ; do r4 now
257  lwz    r1,12(r3)  ; do sp now
258  lwz    r3,20(r3)  ; do r3 last
259  bctr
260
261#elif defined(__arm64__) || defined(__aarch64__)
262
263//
264// void libunwind::Registers_arm64::jumpto()
265//
266// On entry:
267//  thread_state pointer is in x0
268//
269  .p2align 2
270DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
271  // skip restore of x0,x1 for now
272  ldp    x2, x3,  [x0, #0x010]
273  ldp    x4, x5,  [x0, #0x020]
274  ldp    x6, x7,  [x0, #0x030]
275  ldp    x8, x9,  [x0, #0x040]
276  ldp    x10,x11, [x0, #0x050]
277  ldp    x12,x13, [x0, #0x060]
278  ldp    x14,x15, [x0, #0x070]
279  ldp    x16,x17, [x0, #0x080]
280  ldp    x18,x19, [x0, #0x090]
281  ldp    x20,x21, [x0, #0x0A0]
282  ldp    x22,x23, [x0, #0x0B0]
283  ldp    x24,x25, [x0, #0x0C0]
284  ldp    x26,x27, [x0, #0x0D0]
285  ldp    x28,x29, [x0, #0x0E0]
286  ldr    x30,     [x0, #0x100]  // restore pc into lr
287  ldr    x1,      [x0, #0x0F8]
288  mov    sp,x1                  // restore sp
289
290  ldp    d0, d1,  [x0, #0x110]
291  ldp    d2, d3,  [x0, #0x120]
292  ldp    d4, d5,  [x0, #0x130]
293  ldp    d6, d7,  [x0, #0x140]
294  ldp    d8, d9,  [x0, #0x150]
295  ldp    d10,d11, [x0, #0x160]
296  ldp    d12,d13, [x0, #0x170]
297  ldp    d14,d15, [x0, #0x180]
298  ldp    d16,d17, [x0, #0x190]
299  ldp    d18,d19, [x0, #0x1A0]
300  ldp    d20,d21, [x0, #0x1B0]
301  ldp    d22,d23, [x0, #0x1C0]
302  ldp    d24,d25, [x0, #0x1D0]
303  ldp    d26,d27, [x0, #0x1E0]
304  ldp    d28,d29, [x0, #0x1F0]
305  ldr    d30,     [x0, #0x200]
306  ldr    d31,     [x0, #0x208]
307
308  ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
309  ret    x30                    // jump to pc
310
311#elif defined(__arm__) && !defined(__APPLE__)
312
313#if !defined(__ARM_ARCH_ISA_ARM)
314  .thumb
315#endif
316
317@
318@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
319@
320@ On entry:
321@  thread_state pointer is in r0
322@
323  .p2align 2
324DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
325#if !defined(__ARM_ARCH_ISA_ARM)
326  ldr r2, [r0, #52]
327  ldr r3, [r0, #60]
328  mov sp, r2
329  mov lr, r3         @ restore pc into lr
330  ldm r0, {r0-r7}
331#else
332  @ Use lr as base so that r0 can be restored.
333  mov lr, r0
334  @ 32bit thumb-2 restrictions for ldm:
335  @ . the sp (r13) cannot be in the list
336  @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
337  ldm lr, {r0-r12}
338  ldr sp, [lr, #52]
339  ldr lr, [lr, #60]  @ restore pc into lr
340#endif
341  JMP(lr)
342
343@
344@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
345@
346@ On entry:
347@  values pointer is in r0
348@
349  .p2align 2
350  .fpu vfpv3-d16
351DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPy)
352  @ VFP and iwMMX instructions are only available when compiling with the flags
353  @ that enable them. We do not want to do that in the library (because we do not
354  @ want the compiler to generate instructions that access those) but this is
355  @ only accessed if the personality routine needs these registers. Use of
356  @ these registers implies they are, actually, available on the target, so
357  @ it's ok to execute.
358  @ So, generate the instruction using the corresponding coprocessor mnemonic.
359  vldmia r0, {d0-d15}
360  JMP(lr)
361
362@
363@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
364@
365@ On entry:
366@  values pointer is in r0
367@
368  .p2align 2
369  .fpu vfpv3-d16
370DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPy)
371  vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
372  JMP(lr)
373
374@
375@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
376@
377@ On entry:
378@  values pointer is in r0
379@
380  .p2align 2
381  .fpu vfpv3
382DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPy)
383  vldmia r0, {d16-d31}
384  JMP(lr)
385
386@
387@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
388@
389@ On entry:
390@  values pointer is in r0
391@
392  .p2align 2
393DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPy)
394#if (!defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_6SM__)) || defined(__ARM_WMMX)
395  ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
396  ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
397  ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
398  ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
399  ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
400  ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
401  ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
402  ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
403  ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
404  ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
405  ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
406  ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
407  ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
408  ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
409  ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
410  ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
411#endif
412  JMP(lr)
413
414@
415@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
416@
417@ On entry:
418@  values pointer is in r0
419@
420  .p2align 2
421DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
422#if (!defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_6SM__)) || defined(__ARM_WMMX)
423  ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
424  ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
425  ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
426  ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
427#endif
428  JMP(lr)
429
430#elif defined(__or1k__)
431
432DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
433#
434# void libunwind::Registers_or1k::jumpto()
435#
436# On entry:
437#  thread_state pointer is in r3
438#
439
440  # restore integral registerrs
441  l.lwz     r0,  0(r3)
442  l.lwz     r1,  4(r3)
443  l.lwz     r2,  8(r3)
444  # skip r3 for now
445  l.lwz     r4, 16(r3)
446  l.lwz     r5, 20(r3)
447  l.lwz     r6, 24(r3)
448  l.lwz     r7, 28(r3)
449  l.lwz     r8, 32(r3)
450  l.lwz     r9, 36(r3)
451  l.lwz    r10, 40(r3)
452  l.lwz    r11, 44(r3)
453  l.lwz    r12, 48(r3)
454  l.lwz    r13, 52(r3)
455  l.lwz    r14, 56(r3)
456  l.lwz    r15, 60(r3)
457  l.lwz    r16, 64(r3)
458  l.lwz    r17, 68(r3)
459  l.lwz    r18, 72(r3)
460  l.lwz    r19, 76(r3)
461  l.lwz    r20, 80(r3)
462  l.lwz    r21, 84(r3)
463  l.lwz    r22, 88(r3)
464  l.lwz    r23, 92(r3)
465  l.lwz    r24, 96(r3)
466  l.lwz    r25,100(r3)
467  l.lwz    r26,104(r3)
468  l.lwz    r27,108(r3)
469  l.lwz    r28,112(r3)
470  l.lwz    r29,116(r3)
471  l.lwz    r30,120(r3)
472  l.lwz    r31,124(r3)
473
474  # at last, restore r3
475  l.lwz    r3,  12(r3)
476
477  # jump to pc
478  l.jr     r9
479   l.nop
480
481#elif defined(__riscv)
482
483//
484// void libunwind::Registers_riscv::jumpto()
485//
486// On entry:
487//  thread_state pointer is in a0
488//
489  .p2align 2
490DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_riscv6jumptoEv)
491#ifdef __riscv_float_abi_double
492  fld    f0, (8 * 32 + 8 * 0)(a0)
493  fld    f1, (8 * 32 + 8 * 1)(a0)
494  fld    f2, (8 * 32 + 8 * 2)(a0)
495  fld    f3, (8 * 32 + 8 * 3)(a0)
496  fld    f4, (8 * 32 + 8 * 4)(a0)
497  fld    f5, (8 * 32 + 8 * 5)(a0)
498  fld    f6, (8 * 32 + 8 * 6)(a0)
499  fld    f7, (8 * 32 + 8 * 7)(a0)
500  fld    f8, (8 * 32 + 8 * 8)(a0)
501  fld    f9, (8 * 32 + 8 * 9)(a0)
502  fld    f10, (8 * 32 + 8 * 10)(a0)
503  fld    f11, (8 * 32 + 8 * 11)(a0)
504  fld    f12, (8 * 32 + 8 * 12)(a0)
505  fld    f13, (8 * 32 + 8 * 13)(a0)
506  fld    f14, (8 * 32 + 8 * 14)(a0)
507  fld    f15, (8 * 32 + 8 * 15)(a0)
508  fld    f16, (8 * 32 + 8 * 16)(a0)
509  fld    f17, (8 * 32 + 8 * 17)(a0)
510  fld    f18, (8 * 32 + 8 * 18)(a0)
511  fld    f19, (8 * 32 + 8 * 19)(a0)
512  fld    f20, (8 * 32 + 8 * 20)(a0)
513  fld    f21, (8 * 32 + 8 * 21)(a0)
514  fld    f22, (8 * 32 + 8 * 22)(a0)
515  fld    f23, (8 * 32 + 8 * 23)(a0)
516  fld    f24, (8 * 32 + 8 * 24)(a0)
517  fld    f25, (8 * 32 + 8 * 25)(a0)
518  fld    f26, (8 * 32 + 8 * 26)(a0)
519  fld    f27, (8 * 32 + 8 * 27)(a0)
520  fld    f28, (8 * 32 + 8 * 28)(a0)
521  fld    f29, (8 * 32 + 8 * 29)(a0)
522  fld    f30, (8 * 32 + 8 * 30)(a0)
523  fld    f31, (8 * 32 + 8 * 31)(a0)
524#endif
525
526  // x0 is zero
527  ld    x1, (8 * 1)(a0)
528  ld    x2, (8 * 2)(a0)
529  ld    x3, (8 * 3)(a0)
530  ld    x4, (8 * 4)(a0)
531  ld    x5, (8 * 5)(a0)
532  ld    x6, (8 * 6)(a0)
533  ld    x7, (8 * 7)(a0)
534  ld    x8, (8 * 8)(a0)
535  ld    x9, (8 * 9)(a0)
536  // skip a0 for now
537  ld    x11, (8 * 11)(a0)
538  ld    x12, (8 * 12)(a0)
539  ld    x13, (8 * 13)(a0)
540  ld    x14, (8 * 14)(a0)
541  ld    x15, (8 * 15)(a0)
542  ld    x16, (8 * 16)(a0)
543  ld    x17, (8 * 17)(a0)
544  ld    x18, (8 * 18)(a0)
545  ld    x19, (8 * 19)(a0)
546  ld    x20, (8 * 20)(a0)
547  ld    x21, (8 * 21)(a0)
548  ld    x22, (8 * 22)(a0)
549  ld    x23, (8 * 23)(a0)
550  ld    x24, (8 * 24)(a0)
551  ld    x25, (8 * 25)(a0)
552  ld    x26, (8 * 26)(a0)
553  ld    x27, (8 * 27)(a0)
554  ld    x28, (8 * 28)(a0)
555  ld    x29, (8 * 29)(a0)
556  ld    x30, (8 * 30)(a0)
557  ld    x31, (8 * 31)(a0)
558  ld    x10, (8 * 10)(a0)   // restore a0
559
560  ret                       // jump to ra
561
562#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
563
564//
565// void libunwind::Registers_mips_o32::jumpto()
566//
567// On entry:
568//  thread state pointer is in a0 ($4)
569//
570DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
571  .set push
572  .set noat
573  .set noreorder
574  .set nomacro
575#ifdef __mips_hard_float
576#if __mips_fpr == 32
577  ldc1  $f0, (4 * 36 + 8 * 0)($4)
578  ldc1  $f2, (4 * 36 + 8 * 2)($4)
579  ldc1  $f4, (4 * 36 + 8 * 4)($4)
580  ldc1  $f6, (4 * 36 + 8 * 6)($4)
581  ldc1  $f8, (4 * 36 + 8 * 8)($4)
582  ldc1  $f10, (4 * 36 + 8 * 10)($4)
583  ldc1  $f12, (4 * 36 + 8 * 12)($4)
584  ldc1  $f14, (4 * 36 + 8 * 14)($4)
585  ldc1  $f16, (4 * 36 + 8 * 16)($4)
586  ldc1  $f18, (4 * 36 + 8 * 18)($4)
587  ldc1  $f20, (4 * 36 + 8 * 20)($4)
588  ldc1  $f22, (4 * 36 + 8 * 22)($4)
589  ldc1  $f24, (4 * 36 + 8 * 24)($4)
590  ldc1  $f26, (4 * 36 + 8 * 26)($4)
591  ldc1  $f28, (4 * 36 + 8 * 28)($4)
592  ldc1  $f30, (4 * 36 + 8 * 30)($4)
593#else
594  ldc1  $f0, (4 * 36 + 8 * 0)($4)
595  ldc1  $f1, (4 * 36 + 8 * 1)($4)
596  ldc1  $f2, (4 * 36 + 8 * 2)($4)
597  ldc1  $f3, (4 * 36 + 8 * 3)($4)
598  ldc1  $f4, (4 * 36 + 8 * 4)($4)
599  ldc1  $f5, (4 * 36 + 8 * 5)($4)
600  ldc1  $f6, (4 * 36 + 8 * 6)($4)
601  ldc1  $f7, (4 * 36 + 8 * 7)($4)
602  ldc1  $f8, (4 * 36 + 8 * 8)($4)
603  ldc1  $f9, (4 * 36 + 8 * 9)($4)
604  ldc1  $f10, (4 * 36 + 8 * 10)($4)
605  ldc1  $f11, (4 * 36 + 8 * 11)($4)
606  ldc1  $f12, (4 * 36 + 8 * 12)($4)
607  ldc1  $f13, (4 * 36 + 8 * 13)($4)
608  ldc1  $f14, (4 * 36 + 8 * 14)($4)
609  ldc1  $f15, (4 * 36 + 8 * 15)($4)
610  ldc1  $f16, (4 * 36 + 8 * 16)($4)
611  ldc1  $f17, (4 * 36 + 8 * 17)($4)
612  ldc1  $f18, (4 * 36 + 8 * 18)($4)
613  ldc1  $f19, (4 * 36 + 8 * 19)($4)
614  ldc1  $f20, (4 * 36 + 8 * 20)($4)
615  ldc1  $f21, (4 * 36 + 8 * 21)($4)
616  ldc1  $f22, (4 * 36 + 8 * 22)($4)
617  ldc1  $f23, (4 * 36 + 8 * 23)($4)
618  ldc1  $f24, (4 * 36 + 8 * 24)($4)
619  ldc1  $f25, (4 * 36 + 8 * 25)($4)
620  ldc1  $f26, (4 * 36 + 8 * 26)($4)
621  ldc1  $f27, (4 * 36 + 8 * 27)($4)
622  ldc1  $f28, (4 * 36 + 8 * 28)($4)
623  ldc1  $f29, (4 * 36 + 8 * 29)($4)
624  ldc1  $f30, (4 * 36 + 8 * 30)($4)
625  ldc1  $f31, (4 * 36 + 8 * 31)($4)
626#endif
627#endif
628  // restore hi and lo
629  lw    $8, (4 * 33)($4)
630  mthi  $8
631  lw    $8, (4 * 34)($4)
632  mtlo  $8
633  // r0 is zero
634  lw    $1, (4 * 1)($4)
635  lw    $2, (4 * 2)($4)
636  lw    $3, (4 * 3)($4)
637  // skip a0 for now
638  lw    $5, (4 * 5)($4)
639  lw    $6, (4 * 6)($4)
640  lw    $7, (4 * 7)($4)
641  lw    $8, (4 * 8)($4)
642  lw    $9, (4 * 9)($4)
643  lw    $10, (4 * 10)($4)
644  lw    $11, (4 * 11)($4)
645  lw    $12, (4 * 12)($4)
646  lw    $13, (4 * 13)($4)
647  lw    $14, (4 * 14)($4)
648  lw    $15, (4 * 15)($4)
649  lw    $16, (4 * 16)($4)
650  lw    $17, (4 * 17)($4)
651  lw    $18, (4 * 18)($4)
652  lw    $19, (4 * 19)($4)
653  lw    $20, (4 * 20)($4)
654  lw    $21, (4 * 21)($4)
655  lw    $22, (4 * 22)($4)
656  lw    $23, (4 * 23)($4)
657  lw    $24, (4 * 24)($4)
658  lw    $25, (4 * 25)($4)
659  lw    $26, (4 * 26)($4)
660  lw    $27, (4 * 27)($4)
661  lw    $28, (4 * 28)($4)
662  lw    $29, (4 * 29)($4)
663  lw    $30, (4 * 30)($4)
664  // load new pc into ra
665  lw    $31, (4 * 32)($4)
666  // jump to ra, load a0 in the delay slot
667  jr    $31
668  lw    $4, (4 * 4)($4)
669  .set pop
670
671#elif defined(__mips64)
672
673//
674// void libunwind::Registers_mips_newabi::jumpto()
675//
676// On entry:
677//  thread state pointer is in a0 ($4)
678//
679DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
680  .set push
681  .set noat
682  .set noreorder
683  .set nomacro
684#ifdef __mips_hard_float
685  ldc1  $f0, (8 * 35)($4)
686  ldc1  $f1, (8 * 36)($4)
687  ldc1  $f2, (8 * 37)($4)
688  ldc1  $f3, (8 * 38)($4)
689  ldc1  $f4, (8 * 39)($4)
690  ldc1  $f5, (8 * 40)($4)
691  ldc1  $f6, (8 * 41)($4)
692  ldc1  $f7, (8 * 42)($4)
693  ldc1  $f8, (8 * 43)($4)
694  ldc1  $f9, (8 * 44)($4)
695  ldc1  $f10, (8 * 45)($4)
696  ldc1  $f11, (8 * 46)($4)
697  ldc1  $f12, (8 * 47)($4)
698  ldc1  $f13, (8 * 48)($4)
699  ldc1  $f14, (8 * 49)($4)
700  ldc1  $f15, (8 * 50)($4)
701  ldc1  $f16, (8 * 51)($4)
702  ldc1  $f17, (8 * 52)($4)
703  ldc1  $f18, (8 * 53)($4)
704  ldc1  $f19, (8 * 54)($4)
705  ldc1  $f20, (8 * 55)($4)
706  ldc1  $f21, (8 * 56)($4)
707  ldc1  $f22, (8 * 57)($4)
708  ldc1  $f23, (8 * 58)($4)
709  ldc1  $f24, (8 * 59)($4)
710  ldc1  $f25, (8 * 60)($4)
711  ldc1  $f26, (8 * 61)($4)
712  ldc1  $f27, (8 * 62)($4)
713  ldc1  $f28, (8 * 63)($4)
714  ldc1  $f29, (8 * 64)($4)
715  ldc1  $f30, (8 * 65)($4)
716  ldc1  $f31, (8 * 66)($4)
717#endif
718  // restore hi and lo
719  ld    $8, (8 * 33)($4)
720  mthi  $8
721  ld    $8, (8 * 34)($4)
722  mtlo  $8
723  // r0 is zero
724  ld    $1, (8 * 1)($4)
725  ld    $2, (8 * 2)($4)
726  ld    $3, (8 * 3)($4)
727  // skip a0 for now
728  ld    $5, (8 * 5)($4)
729  ld    $6, (8 * 6)($4)
730  ld    $7, (8 * 7)($4)
731  ld    $8, (8 * 8)($4)
732  ld    $9, (8 * 9)($4)
733  ld    $10, (8 * 10)($4)
734  ld    $11, (8 * 11)($4)
735  ld    $12, (8 * 12)($4)
736  ld    $13, (8 * 13)($4)
737  ld    $14, (8 * 14)($4)
738  ld    $15, (8 * 15)($4)
739  ld    $16, (8 * 16)($4)
740  ld    $17, (8 * 17)($4)
741  ld    $18, (8 * 18)($4)
742  ld    $19, (8 * 19)($4)
743  ld    $20, (8 * 20)($4)
744  ld    $21, (8 * 21)($4)
745  ld    $22, (8 * 22)($4)
746  ld    $23, (8 * 23)($4)
747  ld    $24, (8 * 24)($4)
748  ld    $25, (8 * 25)($4)
749  ld    $26, (8 * 26)($4)
750  ld    $27, (8 * 27)($4)
751  ld    $28, (8 * 28)($4)
752  ld    $29, (8 * 29)($4)
753  ld    $30, (8 * 30)($4)
754  // load new pc into ra
755  ld    $31, (8 * 32)($4)
756  // jump to ra, load a0 in the delay slot
757  jr    $31
758  ld    $4, (8 * 4)($4)
759  .set pop
760
761#endif
762
763NO_EXEC_STACK_DIRECTIVE
764
765