UnwindRegistersRestore.S revision 288149
1//===-------------------- UnwindRegistersRestore.S ------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is dual licensed under the MIT and the University of Illinois Open
6// Source Licenses. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "assembly.h"
11
12  .text
13
14#if defined(__i386__)
15DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
16#
17# void libunwind::Registers_x86::jumpto()
18#
19# On entry:
20#  +                       +
21#  +-----------------------+
22#  + thread_state pointer  +
23#  +-----------------------+
24#  + return address        +
25#  +-----------------------+   <-- SP
26#  +                       +
27  movl   4(%esp), %eax
28  # set up eax and ret on new stack location
29  movl  28(%eax), %edx # edx holds new stack pointer
30  subl  $8,%edx
31  movl  %edx, 28(%eax)
32  movl  0(%eax), %ebx
33  movl  %ebx, 0(%edx)
34  movl  40(%eax), %ebx
35  movl  %ebx, 4(%edx)
36  # we now have ret and eax pushed onto where new stack will be
37  # restore all registers
38  movl   4(%eax), %ebx
39  movl   8(%eax), %ecx
40  movl  12(%eax), %edx
41  movl  16(%eax), %edi
42  movl  20(%eax), %esi
43  movl  24(%eax), %ebp
44  movl  28(%eax), %esp
45  # skip ss
46  # skip eflags
47  pop    %eax  # eax was already pushed on new stack
48  ret        # eip was already pushed on new stack
49  # skip cs
50  # skip ds
51  # skip es
52  # skip fs
53  # skip gs
54
55#elif defined(__x86_64__)
56
57DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
58#
59# void libunwind::Registers_x86_64::jumpto()
60#
61# On entry, thread_state pointer is in rdi
62
63  movq  56(%rdi), %rax # rax holds new stack pointer
64  subq  $16, %rax
65  movq  %rax, 56(%rdi)
66  movq  32(%rdi), %rbx  # store new rdi on new stack
67  movq  %rbx, 0(%rax)
68  movq  128(%rdi), %rbx # store new rip on new stack
69  movq  %rbx, 8(%rax)
70  # restore all registers
71  movq    0(%rdi), %rax
72  movq    8(%rdi), %rbx
73  movq   16(%rdi), %rcx
74  movq   24(%rdi), %rdx
75  # restore rdi later
76  movq   40(%rdi), %rsi
77  movq   48(%rdi), %rbp
78  # restore rsp later
79  movq   64(%rdi), %r8
80  movq   72(%rdi), %r9
81  movq   80(%rdi), %r10
82  movq   88(%rdi), %r11
83  movq   96(%rdi), %r12
84  movq  104(%rdi), %r13
85  movq  112(%rdi), %r14
86  movq  120(%rdi), %r15
87  # skip rflags
88  # skip cs
89  # skip fs
90  # skip gs
91  movq  56(%rdi), %rsp  # cut back rsp to new location
92  pop    %rdi      # rdi was saved here earlier
93  ret            # rip was saved here
94
95
96#elif defined(__ppc__)
97
98DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
99;
100; void libunwind::Registers_ppc::jumpto()
101;
102; On entry:
103;  thread_state pointer is in r3
104;
105
106  ; restore integral registerrs
107  ; skip r0 for now
108  ; skip r1 for now
109  lwz     r2, 16(r3)
110  ; skip r3 for now
111  ; skip r4 for now
112  ; skip r5 for now
113  lwz     r6, 32(r3)
114  lwz     r7, 36(r3)
115  lwz     r8, 40(r3)
116  lwz     r9, 44(r3)
117  lwz    r10, 48(r3)
118  lwz    r11, 52(r3)
119  lwz    r12, 56(r3)
120  lwz    r13, 60(r3)
121  lwz    r14, 64(r3)
122  lwz    r15, 68(r3)
123  lwz    r16, 72(r3)
124  lwz    r17, 76(r3)
125  lwz    r18, 80(r3)
126  lwz    r19, 84(r3)
127  lwz    r20, 88(r3)
128  lwz    r21, 92(r3)
129  lwz    r22, 96(r3)
130  lwz    r23,100(r3)
131  lwz    r24,104(r3)
132  lwz    r25,108(r3)
133  lwz    r26,112(r3)
134  lwz    r27,116(r3)
135  lwz    r28,120(r3)
136  lwz    r29,124(r3)
137  lwz    r30,128(r3)
138  lwz    r31,132(r3)
139
140  ; restore float registers
141  lfd    f0, 160(r3)
142  lfd    f1, 168(r3)
143  lfd    f2, 176(r3)
144  lfd    f3, 184(r3)
145  lfd    f4, 192(r3)
146  lfd    f5, 200(r3)
147  lfd    f6, 208(r3)
148  lfd    f7, 216(r3)
149  lfd    f8, 224(r3)
150  lfd    f9, 232(r3)
151  lfd    f10,240(r3)
152  lfd    f11,248(r3)
153  lfd    f12,256(r3)
154  lfd    f13,264(r3)
155  lfd    f14,272(r3)
156  lfd    f15,280(r3)
157  lfd    f16,288(r3)
158  lfd    f17,296(r3)
159  lfd    f18,304(r3)
160  lfd    f19,312(r3)
161  lfd    f20,320(r3)
162  lfd    f21,328(r3)
163  lfd    f22,336(r3)
164  lfd    f23,344(r3)
165  lfd    f24,352(r3)
166  lfd    f25,360(r3)
167  lfd    f26,368(r3)
168  lfd    f27,376(r3)
169  lfd    f28,384(r3)
170  lfd    f29,392(r3)
171  lfd    f30,400(r3)
172  lfd    f31,408(r3)
173
174  ; restore vector registers if any are in use
175  lwz    r5,156(r3)  ; test VRsave
176  cmpwi  r5,0
177  beq    Lnovec
178
179  subi  r4,r1,16
180  rlwinm  r4,r4,0,0,27  ; mask low 4-bits
181  ; r4 is now a 16-byte aligned pointer into the red zone
182  ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
183
184
185#define LOAD_VECTOR_UNALIGNEDl(_index) \
186  andis.  r0,r5,(1<<(15-_index))  @\
187  beq    Ldone  ## _index     @\
188  lwz    r0, 424+_index*16(r3)  @\
189  stw    r0, 0(r4)        @\
190  lwz    r0, 424+_index*16+4(r3)  @\
191  stw    r0, 4(r4)        @\
192  lwz    r0, 424+_index*16+8(r3)  @\
193  stw    r0, 8(r4)        @\
194  lwz    r0, 424+_index*16+12(r3)@\
195  stw    r0, 12(r4)        @\
196  lvx    v ## _index,0,r4    @\
197Ldone  ## _index:
198
199#define LOAD_VECTOR_UNALIGNEDh(_index) \
200  andi.  r0,r5,(1<<(31-_index))  @\
201  beq    Ldone  ## _index    @\
202  lwz    r0, 424+_index*16(r3)  @\
203  stw    r0, 0(r4)        @\
204  lwz    r0, 424+_index*16+4(r3)  @\
205  stw    r0, 4(r4)        @\
206  lwz    r0, 424+_index*16+8(r3)  @\
207  stw    r0, 8(r4)        @\
208  lwz    r0, 424+_index*16+12(r3)@\
209  stw    r0, 12(r4)        @\
210  lvx    v ## _index,0,r4    @\
211  Ldone  ## _index:
212
213
214  LOAD_VECTOR_UNALIGNEDl(0)
215  LOAD_VECTOR_UNALIGNEDl(1)
216  LOAD_VECTOR_UNALIGNEDl(2)
217  LOAD_VECTOR_UNALIGNEDl(3)
218  LOAD_VECTOR_UNALIGNEDl(4)
219  LOAD_VECTOR_UNALIGNEDl(5)
220  LOAD_VECTOR_UNALIGNEDl(6)
221  LOAD_VECTOR_UNALIGNEDl(7)
222  LOAD_VECTOR_UNALIGNEDl(8)
223  LOAD_VECTOR_UNALIGNEDl(9)
224  LOAD_VECTOR_UNALIGNEDl(10)
225  LOAD_VECTOR_UNALIGNEDl(11)
226  LOAD_VECTOR_UNALIGNEDl(12)
227  LOAD_VECTOR_UNALIGNEDl(13)
228  LOAD_VECTOR_UNALIGNEDl(14)
229  LOAD_VECTOR_UNALIGNEDl(15)
230  LOAD_VECTOR_UNALIGNEDh(16)
231  LOAD_VECTOR_UNALIGNEDh(17)
232  LOAD_VECTOR_UNALIGNEDh(18)
233  LOAD_VECTOR_UNALIGNEDh(19)
234  LOAD_VECTOR_UNALIGNEDh(20)
235  LOAD_VECTOR_UNALIGNEDh(21)
236  LOAD_VECTOR_UNALIGNEDh(22)
237  LOAD_VECTOR_UNALIGNEDh(23)
238  LOAD_VECTOR_UNALIGNEDh(24)
239  LOAD_VECTOR_UNALIGNEDh(25)
240  LOAD_VECTOR_UNALIGNEDh(26)
241  LOAD_VECTOR_UNALIGNEDh(27)
242  LOAD_VECTOR_UNALIGNEDh(28)
243  LOAD_VECTOR_UNALIGNEDh(29)
244  LOAD_VECTOR_UNALIGNEDh(30)
245  LOAD_VECTOR_UNALIGNEDh(31)
246
247Lnovec:
248  lwz    r0, 136(r3) ; __cr
249  mtocrf  255,r0
250  lwz    r0, 148(r3) ; __ctr
251  mtctr  r0
252  lwz    r0, 0(r3)  ; __ssr0
253  mtctr  r0
254  lwz    r0, 8(r3)  ; do r0 now
255  lwz    r5,28(r3)  ; do r5 now
256  lwz    r4,24(r3)  ; do r4 now
257  lwz    r1,12(r3)  ; do sp now
258  lwz    r3,20(r3)  ; do r3 last
259  bctr
260
261#elif defined(__arm64__) || defined(__aarch64__)
262
263//
264// void libunwind::Registers_arm64::jumpto()
265//
266// On entry:
267//  thread_state pointer is in x0
268//
269  .p2align 2
270DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
271  // skip restore of x0,x1 for now
272  ldp    x2, x3,  [x0, #0x010]
273  ldp    x4, x5,  [x0, #0x020]
274  ldp    x6, x7,  [x0, #0x030]
275  ldp    x8, x9,  [x0, #0x040]
276  ldp    x10,x11, [x0, #0x050]
277  ldp    x12,x13, [x0, #0x060]
278  ldp    x14,x15, [x0, #0x070]
279  ldp    x16,x17, [x0, #0x080]
280  ldp    x18,x19, [x0, #0x090]
281  ldp    x20,x21, [x0, #0x0A0]
282  ldp    x22,x23, [x0, #0x0B0]
283  ldp    x24,x25, [x0, #0x0C0]
284  ldp    x26,x27, [x0, #0x0D0]
285  ldp    x28,fp,  [x0, #0x0E0]
286  ldr    lr,      [x0, #0x100]  // restore pc into lr
287  ldr    x1,      [x0, #0x0F8]
288  mov    sp,x1                  // restore sp
289
290  ldp    d0, d1,  [x0, #0x110]
291  ldp    d2, d3,  [x0, #0x120]
292  ldp    d4, d5,  [x0, #0x130]
293  ldp    d6, d7,  [x0, #0x140]
294  ldp    d8, d9,  [x0, #0x150]
295  ldp    d10,d11, [x0, #0x160]
296  ldp    d12,d13, [x0, #0x170]
297  ldp    d14,d15, [x0, #0x180]
298  ldp    d16,d17, [x0, #0x190]
299  ldp    d18,d19, [x0, #0x1A0]
300  ldp    d20,d21, [x0, #0x1B0]
301  ldp    d22,d23, [x0, #0x1C0]
302  ldp    d24,d25, [x0, #0x1D0]
303  ldp    d26,d27, [x0, #0x1E0]
304  ldp    d28,d29, [x0, #0x1F0]
305  ldr    d30,     [x0, #0x200]
306  ldr    d31,     [x0, #0x208]
307
308  ldp    x0, x1,  [x0, #0x000]  // restore x0,x1
309  ret    lr                     // jump to pc
310
311#elif defined(__arm__) && !defined(__APPLE__)
312
313#if !defined(__ARM_ARCH_ISA_ARM)
314  .thumb
315#endif
316
317@
318@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
319@
320@ On entry:
321@  thread_state pointer is in r0
322@
323  .p2align 2
324DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
325#if !defined(__ARM_ARCH_ISA_ARM)
326  ldr r2, [r0, #52]
327  ldr r3, [r0, #60]
328  mov sp, r2
329  mov lr, r3         @ restore pc into lr
330  ldm r0, {r0-r7}
331#else
332  @ Use lr as base so that r0 can be restored.
333  mov lr, r0
334  @ 32bit thumb-2 restrictions for ldm:
335  @ . the sp (r13) cannot be in the list
336  @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
337  ldm lr, {r0-r12}
338  ldr sp, [lr, #52]
339  ldr lr, [lr, #60]  @ restore pc into lr
340#endif
341  JMP(lr)
342
343@
344@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
345@
346@ On entry:
347@  values pointer is in r0
348@
349  .p2align 2
350  .fpu vfpv3-d16
351DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPy)
352  @ VFP and iwMMX instructions are only available when compiling with the flags
353  @ that enable them. We do not want to do that in the library (because we do not
354  @ want the compiler to generate instructions that access those) but this is
355  @ only accessed if the personality routine needs these registers. Use of
356  @ these registers implies they are, actually, available on the target, so
357  @ it's ok to execute.
358  @ So, generate the instruction using the corresponding coprocessor mnemonic.
359  vldmia r0, {d0-d15}
360  JMP(lr)
361
362@
363@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
364@
365@ On entry:
366@  values pointer is in r0
367@
368  .p2align 2
369  .fpu vfpv3-d16
370DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPy)
371  vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
372  JMP(lr)
373
374@
375@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
376@
377@ On entry:
378@  values pointer is in r0
379@
380  .p2align 2
381  .fpu vfpv3
382DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPy)
383  vldmia r0, {d16-d31}
384  JMP(lr)
385
386@
387@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
388@
389@ On entry:
390@  values pointer is in r0
391@
392  .p2align 2
393DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPy)
394#if (!defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_6SM__)) || defined(__ARM_WMMX)
395  ldcl p1, cr0, [r0], #8  @ wldrd wR0, [r0], #8
396  ldcl p1, cr1, [r0], #8  @ wldrd wR1, [r0], #8
397  ldcl p1, cr2, [r0], #8  @ wldrd wR2, [r0], #8
398  ldcl p1, cr3, [r0], #8  @ wldrd wR3, [r0], #8
399  ldcl p1, cr4, [r0], #8  @ wldrd wR4, [r0], #8
400  ldcl p1, cr5, [r0], #8  @ wldrd wR5, [r0], #8
401  ldcl p1, cr6, [r0], #8  @ wldrd wR6, [r0], #8
402  ldcl p1, cr7, [r0], #8  @ wldrd wR7, [r0], #8
403  ldcl p1, cr8, [r0], #8  @ wldrd wR8, [r0], #8
404  ldcl p1, cr9, [r0], #8  @ wldrd wR9, [r0], #8
405  ldcl p1, cr10, [r0], #8  @ wldrd wR10, [r0], #8
406  ldcl p1, cr11, [r0], #8  @ wldrd wR11, [r0], #8
407  ldcl p1, cr12, [r0], #8  @ wldrd wR12, [r0], #8
408  ldcl p1, cr13, [r0], #8  @ wldrd wR13, [r0], #8
409  ldcl p1, cr14, [r0], #8  @ wldrd wR14, [r0], #8
410  ldcl p1, cr15, [r0], #8  @ wldrd wR15, [r0], #8
411#endif
412  JMP(lr)
413
414@
415@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
416@
417@ On entry:
418@  values pointer is in r0
419@
420  .p2align 2
421DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
422#if (!defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_6SM__)) || defined(__ARM_WMMX)
423  ldc2 p1, cr8, [r0], #4  @ wldrw wCGR0, [r0], #4
424  ldc2 p1, cr9, [r0], #4  @ wldrw wCGR1, [r0], #4
425  ldc2 p1, cr10, [r0], #4  @ wldrw wCGR2, [r0], #4
426  ldc2 p1, cr11, [r0], #4  @ wldrw wCGR3, [r0], #4
427#endif
428  JMP(lr)
429
430#elif defined(__or1k__)
431
432DEFINE_LIBUNWIND_PRIVATE_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
433#
434# void libunwind::Registers_or1k::jumpto()
435#
436# On entry:
437#  thread_state pointer is in r3
438#
439
440  # restore integral registerrs
441  l.lwz     r0,  0(r3)
442  l.lwz     r1,  4(r3)
443  l.lwz     r2,  8(r3)
444  # skip r3 for now
445  l.lwz     r4, 16(r3)
446  l.lwz     r5, 20(r3)
447  l.lwz     r6, 24(r3)
448  l.lwz     r7, 28(r3)
449  l.lwz     r8, 32(r3)
450  l.lwz     r9, 36(r3)
451  l.lwz    r10, 40(r3)
452  l.lwz    r11, 44(r3)
453  l.lwz    r12, 48(r3)
454  l.lwz    r13, 52(r3)
455  l.lwz    r14, 56(r3)
456  l.lwz    r15, 60(r3)
457  l.lwz    r16, 64(r3)
458  l.lwz    r17, 68(r3)
459  l.lwz    r18, 72(r3)
460  l.lwz    r19, 76(r3)
461  l.lwz    r20, 80(r3)
462  l.lwz    r21, 84(r3)
463  l.lwz    r22, 88(r3)
464  l.lwz    r23, 92(r3)
465  l.lwz    r24, 96(r3)
466  l.lwz    r25,100(r3)
467  l.lwz    r26,104(r3)
468  l.lwz    r27,108(r3)
469  l.lwz    r28,112(r3)
470  l.lwz    r29,116(r3)
471  l.lwz    r30,120(r3)
472  l.lwz    r31,124(r3)
473
474  # at last, restore r3
475  l.lwz    r3,  12(r3)
476
477  # jump to pc
478  l.jr     r9
479   l.nop
480
481#endif
482