1//===------------------------ UnwindRegistersSave.S -----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11    .text
12
13#if !defined(__USING_SJLJ_EXCEPTIONS__)
14
15#if defined(__i386__)
16
17#
18# extern int __unw_getcontext(unw_context_t* thread_state)
19#
20# On entry:
21#   +                       +
22#   +-----------------------+
23#   + thread_state pointer  +
24#   +-----------------------+
25#   + return address        +
26#   +-----------------------+   <-- SP
27#   +                       +
28#
29DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
30  push  %eax
31  movl  8(%esp), %eax
32  movl  %ebx,  4(%eax)
33  movl  %ecx,  8(%eax)
34  movl  %edx, 12(%eax)
35  movl  %edi, 16(%eax)
36  movl  %esi, 20(%eax)
37  movl  %ebp, 24(%eax)
38  movl  %esp, %edx
39  addl  $8, %edx
40  movl  %edx, 28(%eax)  # store what sp was at call site as esp
41  # skip ss
42  # skip eflags
43  movl  4(%esp), %edx
44  movl  %edx, 40(%eax)  # store return address as eip
45  # skip cs
46  # skip ds
47  # skip es
48  # skip fs
49  # skip gs
50  movl  (%esp), %edx
51  movl  %edx, (%eax)  # store original eax
52  popl  %eax
53  xorl  %eax, %eax    # return UNW_ESUCCESS
54  ret
55
56#elif defined(__x86_64__)
57
58#
59# extern int __unw_getcontext(unw_context_t* thread_state)
60#
61# On entry:
62#  thread_state pointer is in rdi
63#
64DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
65#if defined(_WIN64)
66#define PTR %rcx
67#define TMP %rdx
68#else
69#define PTR %rdi
70#define TMP %rsi
71#endif
72
73  movq  %rax,   (PTR)
74  movq  %rbx,  8(PTR)
75  movq  %rcx, 16(PTR)
76  movq  %rdx, 24(PTR)
77  movq  %rdi, 32(PTR)
78  movq  %rsi, 40(PTR)
79  movq  %rbp, 48(PTR)
80  movq  %rsp, 56(PTR)
81  addq  $8,   56(PTR)
82  movq  %r8,  64(PTR)
83  movq  %r9,  72(PTR)
84  movq  %r10, 80(PTR)
85  movq  %r11, 88(PTR)
86  movq  %r12, 96(PTR)
87  movq  %r13,104(PTR)
88  movq  %r14,112(PTR)
89  movq  %r15,120(PTR)
90  movq  (%rsp),TMP
91  movq  TMP,128(PTR) # store return address as rip
92  # skip rflags
93  # skip cs
94  # skip fs
95  # skip gs
96
97#if defined(_WIN64)
98  movdqu %xmm0,176(PTR)
99  movdqu %xmm1,192(PTR)
100  movdqu %xmm2,208(PTR)
101  movdqu %xmm3,224(PTR)
102  movdqu %xmm4,240(PTR)
103  movdqu %xmm5,256(PTR)
104  movdqu %xmm6,272(PTR)
105  movdqu %xmm7,288(PTR)
106  movdqu %xmm8,304(PTR)
107  movdqu %xmm9,320(PTR)
108  movdqu %xmm10,336(PTR)
109  movdqu %xmm11,352(PTR)
110  movdqu %xmm12,368(PTR)
111  movdqu %xmm13,384(PTR)
112  movdqu %xmm14,400(PTR)
113  movdqu %xmm15,416(PTR)
114#endif
115  xorl  %eax, %eax    # return UNW_ESUCCESS
116  ret
117
118#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
119
120#
121# extern int __unw_getcontext(unw_context_t* thread_state)
122#
123# On entry:
124#  thread_state pointer is in a0 ($4)
125#
126DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
127  .set push
128  .set noat
129  .set noreorder
130  .set nomacro
131  sw    $1, (4 * 1)($4)
132  sw    $2, (4 * 2)($4)
133  sw    $3, (4 * 3)($4)
134  sw    $4, (4 * 4)($4)
135  sw    $5, (4 * 5)($4)
136  sw    $6, (4 * 6)($4)
137  sw    $7, (4 * 7)($4)
138  sw    $8, (4 * 8)($4)
139  sw    $9, (4 * 9)($4)
140  sw    $10, (4 * 10)($4)
141  sw    $11, (4 * 11)($4)
142  sw    $12, (4 * 12)($4)
143  sw    $13, (4 * 13)($4)
144  sw    $14, (4 * 14)($4)
145  sw    $15, (4 * 15)($4)
146  sw    $16, (4 * 16)($4)
147  sw    $17, (4 * 17)($4)
148  sw    $18, (4 * 18)($4)
149  sw    $19, (4 * 19)($4)
150  sw    $20, (4 * 20)($4)
151  sw    $21, (4 * 21)($4)
152  sw    $22, (4 * 22)($4)
153  sw    $23, (4 * 23)($4)
154  sw    $24, (4 * 24)($4)
155  sw    $25, (4 * 25)($4)
156  sw    $26, (4 * 26)($4)
157  sw    $27, (4 * 27)($4)
158  sw    $28, (4 * 28)($4)
159  sw    $29, (4 * 29)($4)
160  sw    $30, (4 * 30)($4)
161  sw    $31, (4 * 31)($4)
162  # Store return address to pc
163  sw    $31, (4 * 32)($4)
164  # hi and lo
165  mfhi  $8
166  sw    $8,  (4 * 33)($4)
167  mflo  $8
168  sw    $8,  (4 * 34)($4)
169#ifdef __mips_hard_float
170#if __mips_fpr != 64
171  sdc1  $f0, (4 * 36 + 8 * 0)($4)
172  sdc1  $f2, (4 * 36 + 8 * 2)($4)
173  sdc1  $f4, (4 * 36 + 8 * 4)($4)
174  sdc1  $f6, (4 * 36 + 8 * 6)($4)
175  sdc1  $f8, (4 * 36 + 8 * 8)($4)
176  sdc1  $f10, (4 * 36 + 8 * 10)($4)
177  sdc1  $f12, (4 * 36 + 8 * 12)($4)
178  sdc1  $f14, (4 * 36 + 8 * 14)($4)
179  sdc1  $f16, (4 * 36 + 8 * 16)($4)
180  sdc1  $f18, (4 * 36 + 8 * 18)($4)
181  sdc1  $f20, (4 * 36 + 8 * 20)($4)
182  sdc1  $f22, (4 * 36 + 8 * 22)($4)
183  sdc1  $f24, (4 * 36 + 8 * 24)($4)
184  sdc1  $f26, (4 * 36 + 8 * 26)($4)
185  sdc1  $f28, (4 * 36 + 8 * 28)($4)
186  sdc1  $f30, (4 * 36 + 8 * 30)($4)
187#else
188  sdc1  $f0, (4 * 36 + 8 * 0)($4)
189  sdc1  $f1, (4 * 36 + 8 * 1)($4)
190  sdc1  $f2, (4 * 36 + 8 * 2)($4)
191  sdc1  $f3, (4 * 36 + 8 * 3)($4)
192  sdc1  $f4, (4 * 36 + 8 * 4)($4)
193  sdc1  $f5, (4 * 36 + 8 * 5)($4)
194  sdc1  $f6, (4 * 36 + 8 * 6)($4)
195  sdc1  $f7, (4 * 36 + 8 * 7)($4)
196  sdc1  $f8, (4 * 36 + 8 * 8)($4)
197  sdc1  $f9, (4 * 36 + 8 * 9)($4)
198  sdc1  $f10, (4 * 36 + 8 * 10)($4)
199  sdc1  $f11, (4 * 36 + 8 * 11)($4)
200  sdc1  $f12, (4 * 36 + 8 * 12)($4)
201  sdc1  $f13, (4 * 36 + 8 * 13)($4)
202  sdc1  $f14, (4 * 36 + 8 * 14)($4)
203  sdc1  $f15, (4 * 36 + 8 * 15)($4)
204  sdc1  $f16, (4 * 36 + 8 * 16)($4)
205  sdc1  $f17, (4 * 36 + 8 * 17)($4)
206  sdc1  $f18, (4 * 36 + 8 * 18)($4)
207  sdc1  $f19, (4 * 36 + 8 * 19)($4)
208  sdc1  $f20, (4 * 36 + 8 * 20)($4)
209  sdc1  $f21, (4 * 36 + 8 * 21)($4)
210  sdc1  $f22, (4 * 36 + 8 * 22)($4)
211  sdc1  $f23, (4 * 36 + 8 * 23)($4)
212  sdc1  $f24, (4 * 36 + 8 * 24)($4)
213  sdc1  $f25, (4 * 36 + 8 * 25)($4)
214  sdc1  $f26, (4 * 36 + 8 * 26)($4)
215  sdc1  $f27, (4 * 36 + 8 * 27)($4)
216  sdc1  $f28, (4 * 36 + 8 * 28)($4)
217  sdc1  $f29, (4 * 36 + 8 * 29)($4)
218  sdc1  $f30, (4 * 36 + 8 * 30)($4)
219  sdc1  $f31, (4 * 36 + 8 * 31)($4)
220#endif
221#endif
222  jr	$31
223  # return UNW_ESUCCESS
224  or    $2, $0, $0
225  .set pop
226
227#elif defined(__mips64)
228
229#
230# extern int __unw_getcontext(unw_context_t* thread_state)
231#
232# On entry:
233#  thread_state pointer is in a0 ($4)
234#
235DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
236  .set push
237  .set noat
238  .set noreorder
239  .set nomacro
240  sd    $1, (8 * 1)($4)
241  sd    $2, (8 * 2)($4)
242  sd    $3, (8 * 3)($4)
243  sd    $4, (8 * 4)($4)
244  sd    $5, (8 * 5)($4)
245  sd    $6, (8 * 6)($4)
246  sd    $7, (8 * 7)($4)
247  sd    $8, (8 * 8)($4)
248  sd    $9, (8 * 9)($4)
249  sd    $10, (8 * 10)($4)
250  sd    $11, (8 * 11)($4)
251  sd    $12, (8 * 12)($4)
252  sd    $13, (8 * 13)($4)
253  sd    $14, (8 * 14)($4)
254  sd    $15, (8 * 15)($4)
255  sd    $16, (8 * 16)($4)
256  sd    $17, (8 * 17)($4)
257  sd    $18, (8 * 18)($4)
258  sd    $19, (8 * 19)($4)
259  sd    $20, (8 * 20)($4)
260  sd    $21, (8 * 21)($4)
261  sd    $22, (8 * 22)($4)
262  sd    $23, (8 * 23)($4)
263  sd    $24, (8 * 24)($4)
264  sd    $25, (8 * 25)($4)
265  sd    $26, (8 * 26)($4)
266  sd    $27, (8 * 27)($4)
267  sd    $28, (8 * 28)($4)
268  sd    $29, (8 * 29)($4)
269  sd    $30, (8 * 30)($4)
270  sd    $31, (8 * 31)($4)
271  # Store return address to pc
272  sd    $31, (8 * 32)($4)
273  # hi and lo
274  mfhi  $8
275  sd    $8,  (8 * 33)($4)
276  mflo  $8
277  sd    $8,  (8 * 34)($4)
278#ifdef __mips_hard_float
279  sdc1  $f0, (8 * 35)($4)
280  sdc1  $f1, (8 * 36)($4)
281  sdc1  $f2, (8 * 37)($4)
282  sdc1  $f3, (8 * 38)($4)
283  sdc1  $f4, (8 * 39)($4)
284  sdc1  $f5, (8 * 40)($4)
285  sdc1  $f6, (8 * 41)($4)
286  sdc1  $f7, (8 * 42)($4)
287  sdc1  $f8, (8 * 43)($4)
288  sdc1  $f9, (8 * 44)($4)
289  sdc1  $f10, (8 * 45)($4)
290  sdc1  $f11, (8 * 46)($4)
291  sdc1  $f12, (8 * 47)($4)
292  sdc1  $f13, (8 * 48)($4)
293  sdc1  $f14, (8 * 49)($4)
294  sdc1  $f15, (8 * 50)($4)
295  sdc1  $f16, (8 * 51)($4)
296  sdc1  $f17, (8 * 52)($4)
297  sdc1  $f18, (8 * 53)($4)
298  sdc1  $f19, (8 * 54)($4)
299  sdc1  $f20, (8 * 55)($4)
300  sdc1  $f21, (8 * 56)($4)
301  sdc1  $f22, (8 * 57)($4)
302  sdc1  $f23, (8 * 58)($4)
303  sdc1  $f24, (8 * 59)($4)
304  sdc1  $f25, (8 * 60)($4)
305  sdc1  $f26, (8 * 61)($4)
306  sdc1  $f27, (8 * 62)($4)
307  sdc1  $f28, (8 * 63)($4)
308  sdc1  $f29, (8 * 64)($4)
309  sdc1  $f30, (8 * 65)($4)
310  sdc1  $f31, (8 * 66)($4)
311#endif
312  jr	$31
313  # return UNW_ESUCCESS
314  or    $2, $0, $0
315  .set pop
316
317# elif defined(__mips__)
318
319#
320# extern int __unw_getcontext(unw_context_t* thread_state)
321#
322# Just trap for the time being.
323DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
324  teq $0, $0
325
326#elif defined(__powerpc64__)
327
328//
329// extern int __unw_getcontext(unw_context_t* thread_state)
330//
331// On entry:
332//  thread_state pointer is in r3
333//
334DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
335
336// store register (GPR)
337#define PPC64_STR(n) \
338  std   %r##n, (8 * (n + 2))(%r3)
339
340  // save GPRs
341  PPC64_STR(0)
342  mflr  %r0
343  std   %r0, PPC64_OFFS_SRR0(%r3) // store lr as ssr0
344  PPC64_STR(1)
345  PPC64_STR(2)
346  PPC64_STR(3)
347  PPC64_STR(4)
348  PPC64_STR(5)
349  PPC64_STR(6)
350  PPC64_STR(7)
351  PPC64_STR(8)
352  PPC64_STR(9)
353  PPC64_STR(10)
354  PPC64_STR(11)
355  PPC64_STR(12)
356  PPC64_STR(13)
357  PPC64_STR(14)
358  PPC64_STR(15)
359  PPC64_STR(16)
360  PPC64_STR(17)
361  PPC64_STR(18)
362  PPC64_STR(19)
363  PPC64_STR(20)
364  PPC64_STR(21)
365  PPC64_STR(22)
366  PPC64_STR(23)
367  PPC64_STR(24)
368  PPC64_STR(25)
369  PPC64_STR(26)
370  PPC64_STR(27)
371  PPC64_STR(28)
372  PPC64_STR(29)
373  PPC64_STR(30)
374  PPC64_STR(31)
375
376  mfcr  %r0
377  std   %r0,  PPC64_OFFS_CR(%r3)
378  mfxer %r0
379  std   %r0,  PPC64_OFFS_XER(%r3)
380  mflr  %r0
381  std   %r0,  PPC64_OFFS_LR(%r3)
382  mfctr %r0
383  std   %r0,  PPC64_OFFS_CTR(%r3)
384  mfvrsave    %r0
385  std   %r0,  PPC64_OFFS_VRSAVE(%r3)
386
387#ifdef PPC64_HAS_VMX
388  // save VS registers
389  // (note that this also saves floating point registers and V registers,
390  // because part of VS is mapped to these registers)
391
392  addi  %r4, %r3, PPC64_OFFS_FP
393
394// store VS register
395#define PPC64_STVS(n)      \
396  stxvd2x %vs##n, 0, %r4  ;\
397  addi    %r4, %r4, 16
398
399  PPC64_STVS(0)
400  PPC64_STVS(1)
401  PPC64_STVS(2)
402  PPC64_STVS(3)
403  PPC64_STVS(4)
404  PPC64_STVS(5)
405  PPC64_STVS(6)
406  PPC64_STVS(7)
407  PPC64_STVS(8)
408  PPC64_STVS(9)
409  PPC64_STVS(10)
410  PPC64_STVS(11)
411  PPC64_STVS(12)
412  PPC64_STVS(13)
413  PPC64_STVS(14)
414  PPC64_STVS(15)
415  PPC64_STVS(16)
416  PPC64_STVS(17)
417  PPC64_STVS(18)
418  PPC64_STVS(19)
419  PPC64_STVS(20)
420  PPC64_STVS(21)
421  PPC64_STVS(22)
422  PPC64_STVS(23)
423  PPC64_STVS(24)
424  PPC64_STVS(25)
425  PPC64_STVS(26)
426  PPC64_STVS(27)
427  PPC64_STVS(28)
428  PPC64_STVS(29)
429  PPC64_STVS(30)
430  PPC64_STVS(31)
431  PPC64_STVS(32)
432  PPC64_STVS(33)
433  PPC64_STVS(34)
434  PPC64_STVS(35)
435  PPC64_STVS(36)
436  PPC64_STVS(37)
437  PPC64_STVS(38)
438  PPC64_STVS(39)
439  PPC64_STVS(40)
440  PPC64_STVS(41)
441  PPC64_STVS(42)
442  PPC64_STVS(43)
443  PPC64_STVS(44)
444  PPC64_STVS(45)
445  PPC64_STVS(46)
446  PPC64_STVS(47)
447  PPC64_STVS(48)
448  PPC64_STVS(49)
449  PPC64_STVS(50)
450  PPC64_STVS(51)
451  PPC64_STVS(52)
452  PPC64_STVS(53)
453  PPC64_STVS(54)
454  PPC64_STVS(55)
455  PPC64_STVS(56)
456  PPC64_STVS(57)
457  PPC64_STVS(58)
458  PPC64_STVS(59)
459  PPC64_STVS(60)
460  PPC64_STVS(61)
461  PPC64_STVS(62)
462  PPC64_STVS(63)
463
464#else
465
466// store FP register
467#define PPC64_STF(n) \
468  stfd  %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
469
470  // save float registers
471  PPC64_STF(0)
472  PPC64_STF(1)
473  PPC64_STF(2)
474  PPC64_STF(3)
475  PPC64_STF(4)
476  PPC64_STF(5)
477  PPC64_STF(6)
478  PPC64_STF(7)
479  PPC64_STF(8)
480  PPC64_STF(9)
481  PPC64_STF(10)
482  PPC64_STF(11)
483  PPC64_STF(12)
484  PPC64_STF(13)
485  PPC64_STF(14)
486  PPC64_STF(15)
487  PPC64_STF(16)
488  PPC64_STF(17)
489  PPC64_STF(18)
490  PPC64_STF(19)
491  PPC64_STF(20)
492  PPC64_STF(21)
493  PPC64_STF(22)
494  PPC64_STF(23)
495  PPC64_STF(24)
496  PPC64_STF(25)
497  PPC64_STF(26)
498  PPC64_STF(27)
499  PPC64_STF(28)
500  PPC64_STF(29)
501  PPC64_STF(30)
502  PPC64_STF(31)
503
504  // save vector registers
505
506  // Use 16-bytes below the stack pointer as an
507  // aligned buffer to save each vector register.
508  // Note that the stack pointer is always 16-byte aligned.
509  subi  %r4, %r1, 16
510
511#define PPC64_STV_UNALIGNED(n)                 \
512  stvx  %v##n, 0, %r4                         ;\
513  ld    %r5, 0(%r4)                           ;\
514  std   %r5, (PPC64_OFFS_V + n * 16)(%r3)     ;\
515  ld    %r5, 8(%r4)                           ;\
516  std   %r5, (PPC64_OFFS_V + n * 16 + 8)(%r3)
517
518  PPC64_STV_UNALIGNED(0)
519  PPC64_STV_UNALIGNED(1)
520  PPC64_STV_UNALIGNED(2)
521  PPC64_STV_UNALIGNED(3)
522  PPC64_STV_UNALIGNED(4)
523  PPC64_STV_UNALIGNED(5)
524  PPC64_STV_UNALIGNED(6)
525  PPC64_STV_UNALIGNED(7)
526  PPC64_STV_UNALIGNED(8)
527  PPC64_STV_UNALIGNED(9)
528  PPC64_STV_UNALIGNED(10)
529  PPC64_STV_UNALIGNED(11)
530  PPC64_STV_UNALIGNED(12)
531  PPC64_STV_UNALIGNED(13)
532  PPC64_STV_UNALIGNED(14)
533  PPC64_STV_UNALIGNED(15)
534  PPC64_STV_UNALIGNED(16)
535  PPC64_STV_UNALIGNED(17)
536  PPC64_STV_UNALIGNED(18)
537  PPC64_STV_UNALIGNED(19)
538  PPC64_STV_UNALIGNED(20)
539  PPC64_STV_UNALIGNED(21)
540  PPC64_STV_UNALIGNED(22)
541  PPC64_STV_UNALIGNED(23)
542  PPC64_STV_UNALIGNED(24)
543  PPC64_STV_UNALIGNED(25)
544  PPC64_STV_UNALIGNED(26)
545  PPC64_STV_UNALIGNED(27)
546  PPC64_STV_UNALIGNED(28)
547  PPC64_STV_UNALIGNED(29)
548  PPC64_STV_UNALIGNED(30)
549  PPC64_STV_UNALIGNED(31)
550
551#endif
552
553  li    %r3,  0   // return UNW_ESUCCESS
554  blr
555
556
557#elif defined(__ppc__)
558
559//
560// extern int unw_getcontext(unw_context_t* thread_state)
561//
562// On entry:
563//  thread_state pointer is in r3
564//
565DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
566  stw     %r0,   8(%r3)
567  mflr    %r0
568  stw     %r0,   0(%r3) // store lr as ssr0
569  stw     %r1,  12(%r3)
570  stw     %r2,  16(%r3)
571  stw     %r3,  20(%r3)
572  stw     %r4,  24(%r3)
573  stw     %r5,  28(%r3)
574  stw     %r6,  32(%r3)
575  stw     %r7,  36(%r3)
576  stw     %r8,  40(%r3)
577  stw     %r9,  44(%r3)
578  stw     %r10, 48(%r3)
579  stw     %r11, 52(%r3)
580  stw     %r12, 56(%r3)
581  stw     %r13, 60(%r3)
582  stw     %r14, 64(%r3)
583  stw     %r15, 68(%r3)
584  stw     %r16, 72(%r3)
585  stw     %r17, 76(%r3)
586  stw     %r18, 80(%r3)
587  stw     %r19, 84(%r3)
588  stw     %r20, 88(%r3)
589  stw     %r21, 92(%r3)
590  stw     %r22, 96(%r3)
591  stw     %r23,100(%r3)
592  stw     %r24,104(%r3)
593  stw     %r25,108(%r3)
594  stw     %r26,112(%r3)
595  stw     %r27,116(%r3)
596  stw     %r28,120(%r3)
597  stw     %r29,124(%r3)
598  stw     %r30,128(%r3)
599  stw     %r31,132(%r3)
600
601  // save VRSave register
602  mfspr   %r0, 256
603  stw     %r0, 156(%r3)
604  // save CR registers
605  mfcr    %r0
606  stw     %r0, 136(%r3)
607  // save CTR register
608  mfctr   %r0
609  stw     %r0, 148(%r3)
610
611  // save float registers
612  stfd    %f0, 160(%r3)
613  stfd    %f1, 168(%r3)
614  stfd    %f2, 176(%r3)
615  stfd    %f3, 184(%r3)
616  stfd    %f4, 192(%r3)
617  stfd    %f5, 200(%r3)
618  stfd    %f6, 208(%r3)
619  stfd    %f7, 216(%r3)
620  stfd    %f8, 224(%r3)
621  stfd    %f9, 232(%r3)
622  stfd    %f10,240(%r3)
623  stfd    %f11,248(%r3)
624  stfd    %f12,256(%r3)
625  stfd    %f13,264(%r3)
626  stfd    %f14,272(%r3)
627  stfd    %f15,280(%r3)
628  stfd    %f16,288(%r3)
629  stfd    %f17,296(%r3)
630  stfd    %f18,304(%r3)
631  stfd    %f19,312(%r3)
632  stfd    %f20,320(%r3)
633  stfd    %f21,328(%r3)
634  stfd    %f22,336(%r3)
635  stfd    %f23,344(%r3)
636  stfd    %f24,352(%r3)
637  stfd    %f25,360(%r3)
638  stfd    %f26,368(%r3)
639  stfd    %f27,376(%r3)
640  stfd    %f28,384(%r3)
641  stfd    %f29,392(%r3)
642  stfd    %f30,400(%r3)
643  stfd    %f31,408(%r3)
644
645
646  // save vector registers
647
648  subi    %r4, %r1, 16
649  rlwinm  %r4, %r4, 0, 0, 27  // mask low 4-bits
650  // r4 is now a 16-byte aligned pointer into the red zone
651
652#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
653  stvx    _vec, 0, %r4          SEPARATOR \
654  lwz     %r5, 0(%r4)           SEPARATOR \
655  stw     %r5, _offset(%r3)     SEPARATOR \
656  lwz     %r5, 4(%r4)           SEPARATOR \
657  stw     %r5, _offset+4(%r3)   SEPARATOR \
658  lwz     %r5, 8(%r4)           SEPARATOR \
659  stw     %r5, _offset+8(%r3)   SEPARATOR \
660  lwz     %r5, 12(%r4)          SEPARATOR \
661  stw     %r5, _offset+12(%r3)
662
663  SAVE_VECTOR_UNALIGNED( %v0, 424+0x000)
664  SAVE_VECTOR_UNALIGNED( %v1, 424+0x010)
665  SAVE_VECTOR_UNALIGNED( %v2, 424+0x020)
666  SAVE_VECTOR_UNALIGNED( %v3, 424+0x030)
667  SAVE_VECTOR_UNALIGNED( %v4, 424+0x040)
668  SAVE_VECTOR_UNALIGNED( %v5, 424+0x050)
669  SAVE_VECTOR_UNALIGNED( %v6, 424+0x060)
670  SAVE_VECTOR_UNALIGNED( %v7, 424+0x070)
671  SAVE_VECTOR_UNALIGNED( %v8, 424+0x080)
672  SAVE_VECTOR_UNALIGNED( %v9, 424+0x090)
673  SAVE_VECTOR_UNALIGNED(%v10, 424+0x0A0)
674  SAVE_VECTOR_UNALIGNED(%v11, 424+0x0B0)
675  SAVE_VECTOR_UNALIGNED(%v12, 424+0x0C0)
676  SAVE_VECTOR_UNALIGNED(%v13, 424+0x0D0)
677  SAVE_VECTOR_UNALIGNED(%v14, 424+0x0E0)
678  SAVE_VECTOR_UNALIGNED(%v15, 424+0x0F0)
679  SAVE_VECTOR_UNALIGNED(%v16, 424+0x100)
680  SAVE_VECTOR_UNALIGNED(%v17, 424+0x110)
681  SAVE_VECTOR_UNALIGNED(%v18, 424+0x120)
682  SAVE_VECTOR_UNALIGNED(%v19, 424+0x130)
683  SAVE_VECTOR_UNALIGNED(%v20, 424+0x140)
684  SAVE_VECTOR_UNALIGNED(%v21, 424+0x150)
685  SAVE_VECTOR_UNALIGNED(%v22, 424+0x160)
686  SAVE_VECTOR_UNALIGNED(%v23, 424+0x170)
687  SAVE_VECTOR_UNALIGNED(%v24, 424+0x180)
688  SAVE_VECTOR_UNALIGNED(%v25, 424+0x190)
689  SAVE_VECTOR_UNALIGNED(%v26, 424+0x1A0)
690  SAVE_VECTOR_UNALIGNED(%v27, 424+0x1B0)
691  SAVE_VECTOR_UNALIGNED(%v28, 424+0x1C0)
692  SAVE_VECTOR_UNALIGNED(%v29, 424+0x1D0)
693  SAVE_VECTOR_UNALIGNED(%v30, 424+0x1E0)
694  SAVE_VECTOR_UNALIGNED(%v31, 424+0x1F0)
695
696  li      %r3, 0  // return UNW_ESUCCESS
697  blr
698
699
700#elif defined(__arm64__) || defined(__aarch64__)
701
702//
703// extern int __unw_getcontext(unw_context_t* thread_state)
704//
705// On entry:
706//  thread_state pointer is in x0
707//
708  .p2align 2
709DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
710  stp    x0, x1,  [x0, #0x000]
711  stp    x2, x3,  [x0, #0x010]
712  stp    x4, x5,  [x0, #0x020]
713  stp    x6, x7,  [x0, #0x030]
714  stp    x8, x9,  [x0, #0x040]
715  stp    x10,x11, [x0, #0x050]
716  stp    x12,x13, [x0, #0x060]
717  stp    x14,x15, [x0, #0x070]
718  stp    x16,x17, [x0, #0x080]
719  stp    x18,x19, [x0, #0x090]
720  stp    x20,x21, [x0, #0x0A0]
721  stp    x22,x23, [x0, #0x0B0]
722  stp    x24,x25, [x0, #0x0C0]
723  stp    x26,x27, [x0, #0x0D0]
724  stp    x28,x29, [x0, #0x0E0]
725  str    x30,     [x0, #0x0F0]
726  mov    x1,sp
727  str    x1,      [x0, #0x0F8]
728  str    x30,     [x0, #0x100]    // store return address as pc
729  // skip cpsr
730  stp    d0, d1,  [x0, #0x110]
731  stp    d2, d3,  [x0, #0x120]
732  stp    d4, d5,  [x0, #0x130]
733  stp    d6, d7,  [x0, #0x140]
734  stp    d8, d9,  [x0, #0x150]
735  stp    d10,d11, [x0, #0x160]
736  stp    d12,d13, [x0, #0x170]
737  stp    d14,d15, [x0, #0x180]
738  stp    d16,d17, [x0, #0x190]
739  stp    d18,d19, [x0, #0x1A0]
740  stp    d20,d21, [x0, #0x1B0]
741  stp    d22,d23, [x0, #0x1C0]
742  stp    d24,d25, [x0, #0x1D0]
743  stp    d26,d27, [x0, #0x1E0]
744  stp    d28,d29, [x0, #0x1F0]
745  str    d30,     [x0, #0x200]
746  str    d31,     [x0, #0x208]
747  mov    x0, #0                   // return UNW_ESUCCESS
748  ret
749
750#elif defined(__arm__) && !defined(__APPLE__)
751
752#if !defined(__ARM_ARCH_ISA_ARM)
753#if (__ARM_ARCH_ISA_THUMB == 2)
754  .syntax unified
755#endif
756  .thumb
757#endif
758
759@
760@ extern int __unw_getcontext(unw_context_t* thread_state)
761@
762@ On entry:
763@  thread_state pointer is in r0
764@
765@ Per EHABI #4.7 this only saves the core integer registers.
766@ EHABI #7.4.5 notes that in general all VRS registers should be restored
767@ however this is very hard to do for VFP registers because it is unknown
768@ to the library how many registers are implemented by the architecture.
769@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
770@
771  .p2align 2
772DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
773#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
774  stm r0!, {r0-r7}
775  mov r1, r8
776  mov r2, r9
777  mov r3, r10
778  stm r0!, {r1-r3}
779  mov r1, r11
780  mov r2, sp
781  mov r3, lr
782  str r1, [r0, #0]   @ r11
783  @ r12 does not need storing, it it the intra-procedure-call scratch register
784  str r2, [r0, #8]   @ sp
785  str r3, [r0, #12]  @ lr
786  str r3, [r0, #16]  @ store return address as pc
787  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
788  @ It is safe to use here though because we are about to return, and cpsr is
789  @ not expected to be preserved.
790  movs r0, #0        @ return UNW_ESUCCESS
791#else
792  @ 32bit thumb-2 restrictions for stm:
793  @ . the sp (r13) cannot be in the list
794  @ . the pc (r15) cannot be in the list in an STM instruction
795  stm r0, {r0-r12}
796  str sp, [r0, #52]
797  str lr, [r0, #56]
798  str lr, [r0, #60]  @ store return address as pc
799  mov r0, #0         @ return UNW_ESUCCESS
800#endif
801  JMP(lr)
802
803@
804@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
805@
806@ On entry:
807@  values pointer is in r0
808@
809  .p2align 2
810#if defined(__ELF__)
811  .fpu vfpv3-d16
812#endif
813DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
814  vstmia r0, {d0-d15}
815  JMP(lr)
816
817@
818@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
819@
820@ On entry:
821@  values pointer is in r0
822@
823  .p2align 2
824#if defined(__ELF__)
825  .fpu vfpv3-d16
826#endif
827DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
828  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
829  JMP(lr)
830
831@
832@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
833@
834@ On entry:
835@  values pointer is in r0
836@
837  .p2align 2
838#if defined(__ELF__)
839  .fpu vfpv3
840#endif
841DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
842  @ VFP and iwMMX instructions are only available when compiling with the flags
843  @ that enable them. We do not want to do that in the library (because we do not
844  @ want the compiler to generate instructions that access those) but this is
845  @ only accessed if the personality routine needs these registers. Use of
846  @ these registers implies they are, actually, available on the target, so
847  @ it's ok to execute.
848  @ So, generate the instructions using the corresponding coprocessor mnemonic.
849  vstmia r0, {d16-d31}
850  JMP(lr)
851
852#if defined(_LIBUNWIND_ARM_WMMX)
853
854@
855@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
856@
857@ On entry:
858@  values pointer is in r0
859@
860  .p2align 2
861#if defined(__ELF__)
862  .arch armv5te
863#endif
864DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
865  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
866  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
867  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
868  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
869  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
870  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
871  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
872  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
873  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
874  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
875  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
876  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
877  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
878  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
879  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
880  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
881  JMP(lr)
882
883@
884@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
885@
886@ On entry:
887@  values pointer is in r0
888@
889  .p2align 2
890#if defined(__ELF__)
891  .arch armv5te
892#endif
893DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
894  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
895  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
896  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
897  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
898  JMP(lr)
899
900#endif
901
902#elif defined(__or1k__)
903
904#
905# extern int __unw_getcontext(unw_context_t* thread_state)
906#
907# On entry:
908#  thread_state pointer is in r3
909#
910DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
911  l.sw       0(r3), r0
912  l.sw       4(r3), r1
913  l.sw       8(r3), r2
914  l.sw      12(r3), r3
915  l.sw      16(r3), r4
916  l.sw      20(r3), r5
917  l.sw      24(r3), r6
918  l.sw      28(r3), r7
919  l.sw      32(r3), r8
920  l.sw      36(r3), r9
921  l.sw      40(r3), r10
922  l.sw      44(r3), r11
923  l.sw      48(r3), r12
924  l.sw      52(r3), r13
925  l.sw      56(r3), r14
926  l.sw      60(r3), r15
927  l.sw      64(r3), r16
928  l.sw      68(r3), r17
929  l.sw      72(r3), r18
930  l.sw      76(r3), r19
931  l.sw      80(r3), r20
932  l.sw      84(r3), r21
933  l.sw      88(r3), r22
934  l.sw      92(r3), r23
935  l.sw      96(r3), r24
936  l.sw     100(r3), r25
937  l.sw     104(r3), r26
938  l.sw     108(r3), r27
939  l.sw     112(r3), r28
940  l.sw     116(r3), r29
941  l.sw     120(r3), r30
942  l.sw     124(r3), r31
943  # store ra to pc
944  l.sw     128(r3), r9
945  # zero epcr
946  l.sw     132(r3), r0
947
948#elif defined(__sparc__)
949
950#
951# extern int __unw_getcontext(unw_context_t* thread_state)
952#
953# On entry:
954#  thread_state pointer is in o0
955#
956DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
957  ta 3
958  add %o7, 8, %o7
959  std %g0, [%o0 +   0]
960  std %g2, [%o0 +   8]
961  std %g4, [%o0 +  16]
962  std %g6, [%o0 +  24]
963  std %o0, [%o0 +  32]
964  std %o2, [%o0 +  40]
965  std %o4, [%o0 +  48]
966  std %o6, [%o0 +  56]
967  std %l0, [%o0 +  64]
968  std %l2, [%o0 +  72]
969  std %l4, [%o0 +  80]
970  std %l6, [%o0 +  88]
971  std %i0, [%o0 +  96]
972  std %i2, [%o0 + 104]
973  std %i4, [%o0 + 112]
974  std %i6, [%o0 + 120]
975  jmp %o7
976   clr %o0                   // return UNW_ESUCCESS
977#endif
978
979  WEAK_ALIAS(__unw_getcontext, unw_getcontext)
980
981#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
982
983NO_EXEC_STACK_DIRECTIVE
984