1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11#define FROM_0_TO_15 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
12#define FROM_16_TO_31 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
13
14#define FROM_0_TO_31 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
15#define FROM_32_TO_63 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63
16
17#if defined(_AIX)
18    .toc
19#else
20    .text
21#endif
22
23#if !defined(__USING_SJLJ_EXCEPTIONS__)
24
25#if defined(__i386__)
26
27#
28# extern int __unw_getcontext(unw_context_t* thread_state)
29#
30# On entry:
31#   +                       +
32#   +-----------------------+
33#   + thread_state pointer  +
34#   +-----------------------+
35#   + return address        +
36#   +-----------------------+   <-- SP
37#   +                       +
38#
39DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
40
41  _LIBUNWIND_CET_ENDBR
42  push  %eax
43  movl  8(%esp), %eax
44  movl  %ebx,  4(%eax)
45  movl  %ecx,  8(%eax)
46  movl  %edx, 12(%eax)
47  movl  %edi, 16(%eax)
48  movl  %esi, 20(%eax)
49  movl  %ebp, 24(%eax)
50  movl  %esp, %edx
51  addl  $8, %edx
52  movl  %edx, 28(%eax)  # store what sp was at call site as esp
53  # skip ss
54  # skip eflags
55  movl  4(%esp), %edx
56  movl  %edx, 40(%eax)  # store return address as eip
57  # skip cs
58  # skip ds
59  # skip es
60  # skip fs
61  # skip gs
62  movl  (%esp), %edx
63  movl  %edx, (%eax)  # store original eax
64  popl  %eax
65  xorl  %eax, %eax    # return UNW_ESUCCESS
66  ret
67
68#elif defined(__x86_64__)
69
70#
71# extern int __unw_getcontext(unw_context_t* thread_state)
72#
73# On entry:
74#  thread_state pointer is in rdi
75#
76DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
77#if defined(_WIN64)
78#define PTR %rcx
79#define TMP %rdx
80#else
81#define PTR %rdi
82#define TMP %rsi
83#endif
84
85  _LIBUNWIND_CET_ENDBR
86  movq  %rax,   (PTR)
87  movq  %rbx,  8(PTR)
88  movq  %rcx, 16(PTR)
89  movq  %rdx, 24(PTR)
90  movq  %rdi, 32(PTR)
91  movq  %rsi, 40(PTR)
92  movq  %rbp, 48(PTR)
93  movq  %rsp, 56(PTR)
94  addq  $8,   56(PTR)
95  movq  %r8,  64(PTR)
96  movq  %r9,  72(PTR)
97  movq  %r10, 80(PTR)
98  movq  %r11, 88(PTR)
99  movq  %r12, 96(PTR)
100  movq  %r13,104(PTR)
101  movq  %r14,112(PTR)
102  movq  %r15,120(PTR)
103  movq  (%rsp),TMP
104  movq  TMP,128(PTR) # store return address as rip
105  # skip rflags
106  # skip cs
107  # skip fs
108  # skip gs
109
110#if defined(_WIN64)
111  movdqu %xmm0,176(PTR)
112  movdqu %xmm1,192(PTR)
113  movdqu %xmm2,208(PTR)
114  movdqu %xmm3,224(PTR)
115  movdqu %xmm4,240(PTR)
116  movdqu %xmm5,256(PTR)
117  movdqu %xmm6,272(PTR)
118  movdqu %xmm7,288(PTR)
119  movdqu %xmm8,304(PTR)
120  movdqu %xmm9,320(PTR)
121  movdqu %xmm10,336(PTR)
122  movdqu %xmm11,352(PTR)
123  movdqu %xmm12,368(PTR)
124  movdqu %xmm13,384(PTR)
125  movdqu %xmm14,400(PTR)
126  movdqu %xmm15,416(PTR)
127#endif
128  xorl  %eax, %eax    # return UNW_ESUCCESS
129  ret
130
131#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
132
133#
134# extern int __unw_getcontext(unw_context_t* thread_state)
135#
136# On entry:
137#  thread_state pointer is in a0 ($4)
138#
139DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
140  .set push
141  .set noat
142  .set noreorder
143  .set nomacro
144  sw    $1, (4 * 1)($4)
145  sw    $2, (4 * 2)($4)
146  sw    $3, (4 * 3)($4)
147  sw    $4, (4 * 4)($4)
148  sw    $5, (4 * 5)($4)
149  sw    $6, (4 * 6)($4)
150  sw    $7, (4 * 7)($4)
151  sw    $8, (4 * 8)($4)
152  sw    $9, (4 * 9)($4)
153  sw    $10, (4 * 10)($4)
154  sw    $11, (4 * 11)($4)
155  sw    $12, (4 * 12)($4)
156  sw    $13, (4 * 13)($4)
157  sw    $14, (4 * 14)($4)
158  sw    $15, (4 * 15)($4)
159  sw    $16, (4 * 16)($4)
160  sw    $17, (4 * 17)($4)
161  sw    $18, (4 * 18)($4)
162  sw    $19, (4 * 19)($4)
163  sw    $20, (4 * 20)($4)
164  sw    $21, (4 * 21)($4)
165  sw    $22, (4 * 22)($4)
166  sw    $23, (4 * 23)($4)
167  sw    $24, (4 * 24)($4)
168  sw    $25, (4 * 25)($4)
169  sw    $26, (4 * 26)($4)
170  sw    $27, (4 * 27)($4)
171  sw    $28, (4 * 28)($4)
172  sw    $29, (4 * 29)($4)
173  sw    $30, (4 * 30)($4)
174  sw    $31, (4 * 31)($4)
175  # Store return address to pc
176  sw    $31, (4 * 32)($4)
177#if __mips_isa_rev < 6
178  # hi and lo
179  mfhi  $8
180  sw    $8,  (4 * 33)($4)
181  mflo  $8
182  sw    $8,  (4 * 34)($4)
183#endif
184#ifdef __mips_hard_float
185#if __mips_fpr != 64
186  sdc1  $f0, (4 * 36 + 8 * 0)($4)
187  sdc1  $f2, (4 * 36 + 8 * 2)($4)
188  sdc1  $f4, (4 * 36 + 8 * 4)($4)
189  sdc1  $f6, (4 * 36 + 8 * 6)($4)
190  sdc1  $f8, (4 * 36 + 8 * 8)($4)
191  sdc1  $f10, (4 * 36 + 8 * 10)($4)
192  sdc1  $f12, (4 * 36 + 8 * 12)($4)
193  sdc1  $f14, (4 * 36 + 8 * 14)($4)
194  sdc1  $f16, (4 * 36 + 8 * 16)($4)
195  sdc1  $f18, (4 * 36 + 8 * 18)($4)
196  sdc1  $f20, (4 * 36 + 8 * 20)($4)
197  sdc1  $f22, (4 * 36 + 8 * 22)($4)
198  sdc1  $f24, (4 * 36 + 8 * 24)($4)
199  sdc1  $f26, (4 * 36 + 8 * 26)($4)
200  sdc1  $f28, (4 * 36 + 8 * 28)($4)
201  sdc1  $f30, (4 * 36 + 8 * 30)($4)
202#else
203  sdc1  $f0, (4 * 36 + 8 * 0)($4)
204  sdc1  $f1, (4 * 36 + 8 * 1)($4)
205  sdc1  $f2, (4 * 36 + 8 * 2)($4)
206  sdc1  $f3, (4 * 36 + 8 * 3)($4)
207  sdc1  $f4, (4 * 36 + 8 * 4)($4)
208  sdc1  $f5, (4 * 36 + 8 * 5)($4)
209  sdc1  $f6, (4 * 36 + 8 * 6)($4)
210  sdc1  $f7, (4 * 36 + 8 * 7)($4)
211  sdc1  $f8, (4 * 36 + 8 * 8)($4)
212  sdc1  $f9, (4 * 36 + 8 * 9)($4)
213  sdc1  $f10, (4 * 36 + 8 * 10)($4)
214  sdc1  $f11, (4 * 36 + 8 * 11)($4)
215  sdc1  $f12, (4 * 36 + 8 * 12)($4)
216  sdc1  $f13, (4 * 36 + 8 * 13)($4)
217  sdc1  $f14, (4 * 36 + 8 * 14)($4)
218  sdc1  $f15, (4 * 36 + 8 * 15)($4)
219  sdc1  $f16, (4 * 36 + 8 * 16)($4)
220  sdc1  $f17, (4 * 36 + 8 * 17)($4)
221  sdc1  $f18, (4 * 36 + 8 * 18)($4)
222  sdc1  $f19, (4 * 36 + 8 * 19)($4)
223  sdc1  $f20, (4 * 36 + 8 * 20)($4)
224  sdc1  $f21, (4 * 36 + 8 * 21)($4)
225  sdc1  $f22, (4 * 36 + 8 * 22)($4)
226  sdc1  $f23, (4 * 36 + 8 * 23)($4)
227  sdc1  $f24, (4 * 36 + 8 * 24)($4)
228  sdc1  $f25, (4 * 36 + 8 * 25)($4)
229  sdc1  $f26, (4 * 36 + 8 * 26)($4)
230  sdc1  $f27, (4 * 36 + 8 * 27)($4)
231  sdc1  $f28, (4 * 36 + 8 * 28)($4)
232  sdc1  $f29, (4 * 36 + 8 * 29)($4)
233  sdc1  $f30, (4 * 36 + 8 * 30)($4)
234  sdc1  $f31, (4 * 36 + 8 * 31)($4)
235#endif
236#endif
237  jr	$31
238  # return UNW_ESUCCESS
239  or    $2, $0, $0
240  .set pop
241
242#elif defined(__mips64)
243
244#
245# extern int __unw_getcontext(unw_context_t* thread_state)
246#
247# On entry:
248#  thread_state pointer is in a0 ($4)
249#
250DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
251  .set push
252  .set noat
253  .set noreorder
254  .set nomacro
255  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
256    sd $\i, (8 * \i)($4)
257  .endr
258  # Store return address to pc
259  sd    $31, (8 * 32)($4)
260#if __mips_isa_rev < 6
261  # hi and lo
262  mfhi  $8
263  sd    $8,  (8 * 33)($4)
264  mflo  $8
265  sd    $8,  (8 * 34)($4)
266#endif
267#ifdef __mips_hard_float
268  .irp i,FROM_0_TO_31
269    sdc1 $f\i, (280+8*\i)($4)
270  .endr
271#endif
272  jr	$31
273  # return UNW_ESUCCESS
274  or    $2, $0, $0
275  .set pop
276
277# elif defined(__mips__)
278
279#
280# extern int __unw_getcontext(unw_context_t* thread_state)
281#
282# Just trap for the time being.
283DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
284  teq $0, $0
285
286#elif defined(__powerpc64__)
287
288//
289// extern int __unw_getcontext(unw_context_t* thread_state)
290//
291// On entry:
292//  thread_state pointer is in r3
293//
294#if defined(_AIX)
295DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
296#else
297DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
298#endif
299// store register (GPR)
300#define PPC64_STR(n) \
301  std   n, (8 * (n + 2))(3)
302
303  // save GPRs
304  PPC64_STR(0)
305  mflr  0
306  std   0, PPC64_OFFS_SRR0(3) // store lr as ssr0
307  PPC64_STR(1)
308  PPC64_STR(4)        // Save r4 first since it will be used for fixing r2.
309#if defined(_AIX)
310  // The TOC register (r2) was changed by the glue code if unw_getcontext
311  // is called from a different module. Save the original TOC register
312  // in the context if this is the case.
313  mflr   4
314  lwz    4, 0(4)      // Get the first instruction at the return address.
315  xoris  0, 4, 0xe841 // Is it reloading the TOC register "ld 2,40(1)"?
316  cmplwi 0, 0x28
317  bne    0, LnoR2Fix  // No need to fix up r2 if it is not.
318  ld     2, 40(1)     // Use the saved TOC register in the stack.
319LnoR2Fix:
320#endif
321  PPC64_STR(2)
322  PPC64_STR(3)
323  PPC64_STR(5)
324  PPC64_STR(6)
325  PPC64_STR(7)
326  PPC64_STR(8)
327  PPC64_STR(9)
328  PPC64_STR(10)
329  PPC64_STR(11)
330  PPC64_STR(12)
331  PPC64_STR(13)
332  PPC64_STR(14)
333  PPC64_STR(15)
334  PPC64_STR(16)
335  PPC64_STR(17)
336  PPC64_STR(18)
337  PPC64_STR(19)
338  PPC64_STR(20)
339  PPC64_STR(21)
340  PPC64_STR(22)
341  PPC64_STR(23)
342  PPC64_STR(24)
343  PPC64_STR(25)
344  PPC64_STR(26)
345  PPC64_STR(27)
346  PPC64_STR(28)
347  PPC64_STR(29)
348  PPC64_STR(30)
349  PPC64_STR(31)
350
351  mfcr  0
352  std   0,  PPC64_OFFS_CR(3)
353  mfxer 0
354  std   0,  PPC64_OFFS_XER(3)
355#if defined(_AIX)
356  // LR value saved from the register is not used, initialize it to 0.
357  li    0,  0
358#else
359  mflr  0
360#endif
361  std   0,  PPC64_OFFS_LR(3)
362  mfctr 0
363  std   0,  PPC64_OFFS_CTR(3)
364  mfvrsave    0
365  std   0,  PPC64_OFFS_VRSAVE(3)
366
367#if defined(__VSX__)
368  // save VS registers
369  // (note that this also saves floating point registers and V registers,
370  // because part of VS is mapped to these registers)
371
372  addi  4, 3, PPC64_OFFS_FP
373
374// store VS register
375#ifdef __LITTLE_ENDIAN__
376// For little-endian targets, we need a swap since stxvd2x will store the
377// register in the incorrect doubleword order.
378// FIXME: when supporting targets older than Power9 on LE is no longer required
379//        this can be changed to simply `stxv n, 16 * n(4)`.
380#define PPC64_STVS(n)      \
381  xxswapd n, n            ;\
382  stxvd2x n, 0, 4         ;\
383  addi    4, 4, 16
384#else
385#define PPC64_STVS(n)      \
386  stxvd2x n, 0, 4         ;\
387  addi    4, 4, 16
388#endif
389
390  PPC64_STVS(0)
391  PPC64_STVS(1)
392  PPC64_STVS(2)
393  PPC64_STVS(3)
394  PPC64_STVS(4)
395  PPC64_STVS(5)
396  PPC64_STVS(6)
397  PPC64_STVS(7)
398  PPC64_STVS(8)
399  PPC64_STVS(9)
400  PPC64_STVS(10)
401  PPC64_STVS(11)
402  PPC64_STVS(12)
403  PPC64_STVS(13)
404  PPC64_STVS(14)
405  PPC64_STVS(15)
406  PPC64_STVS(16)
407  PPC64_STVS(17)
408  PPC64_STVS(18)
409  PPC64_STVS(19)
410  PPC64_STVS(20)
411  PPC64_STVS(21)
412  PPC64_STVS(22)
413  PPC64_STVS(23)
414  PPC64_STVS(24)
415  PPC64_STVS(25)
416  PPC64_STVS(26)
417  PPC64_STVS(27)
418  PPC64_STVS(28)
419  PPC64_STVS(29)
420  PPC64_STVS(30)
421  PPC64_STVS(31)
422  PPC64_STVS(32)
423  PPC64_STVS(33)
424  PPC64_STVS(34)
425  PPC64_STVS(35)
426  PPC64_STVS(36)
427  PPC64_STVS(37)
428  PPC64_STVS(38)
429  PPC64_STVS(39)
430  PPC64_STVS(40)
431  PPC64_STVS(41)
432  PPC64_STVS(42)
433  PPC64_STVS(43)
434  PPC64_STVS(44)
435  PPC64_STVS(45)
436  PPC64_STVS(46)
437  PPC64_STVS(47)
438  PPC64_STVS(48)
439  PPC64_STVS(49)
440  PPC64_STVS(50)
441  PPC64_STVS(51)
442  PPC64_STVS(52)
443  PPC64_STVS(53)
444  PPC64_STVS(54)
445  PPC64_STVS(55)
446  PPC64_STVS(56)
447  PPC64_STVS(57)
448  PPC64_STVS(58)
449  PPC64_STVS(59)
450  PPC64_STVS(60)
451  PPC64_STVS(61)
452  PPC64_STVS(62)
453  PPC64_STVS(63)
454
455#else
456
457// store FP register
458#define PPC64_STF(n) \
459  stfd  n, (PPC64_OFFS_FP + n * 16)(3)
460
461  // save float registers
462  PPC64_STF(0)
463  PPC64_STF(1)
464  PPC64_STF(2)
465  PPC64_STF(3)
466  PPC64_STF(4)
467  PPC64_STF(5)
468  PPC64_STF(6)
469  PPC64_STF(7)
470  PPC64_STF(8)
471  PPC64_STF(9)
472  PPC64_STF(10)
473  PPC64_STF(11)
474  PPC64_STF(12)
475  PPC64_STF(13)
476  PPC64_STF(14)
477  PPC64_STF(15)
478  PPC64_STF(16)
479  PPC64_STF(17)
480  PPC64_STF(18)
481  PPC64_STF(19)
482  PPC64_STF(20)
483  PPC64_STF(21)
484  PPC64_STF(22)
485  PPC64_STF(23)
486  PPC64_STF(24)
487  PPC64_STF(25)
488  PPC64_STF(26)
489  PPC64_STF(27)
490  PPC64_STF(28)
491  PPC64_STF(29)
492  PPC64_STF(30)
493  PPC64_STF(31)
494
495#if defined(__ALTIVEC__)
496  // save vector registers
497
498  // Use 16-bytes below the stack pointer as an
499  // aligned buffer to save each vector register.
500  // Note that the stack pointer is always 16-byte aligned.
501  subi  4, 1, 16
502
503#define PPC64_STV_UNALIGNED(n)             \
504  stvx  n, 0, 4                           ;\
505  ld    5, 0(4)                           ;\
506  std   5, (PPC64_OFFS_V + n * 16)(3)     ;\
507  ld    5, 8(4)                           ;\
508  std   5, (PPC64_OFFS_V + n * 16 + 8)(3)
509
510  PPC64_STV_UNALIGNED(0)
511  PPC64_STV_UNALIGNED(1)
512  PPC64_STV_UNALIGNED(2)
513  PPC64_STV_UNALIGNED(3)
514  PPC64_STV_UNALIGNED(4)
515  PPC64_STV_UNALIGNED(5)
516  PPC64_STV_UNALIGNED(6)
517  PPC64_STV_UNALIGNED(7)
518  PPC64_STV_UNALIGNED(8)
519  PPC64_STV_UNALIGNED(9)
520  PPC64_STV_UNALIGNED(10)
521  PPC64_STV_UNALIGNED(11)
522  PPC64_STV_UNALIGNED(12)
523  PPC64_STV_UNALIGNED(13)
524  PPC64_STV_UNALIGNED(14)
525  PPC64_STV_UNALIGNED(15)
526  PPC64_STV_UNALIGNED(16)
527  PPC64_STV_UNALIGNED(17)
528  PPC64_STV_UNALIGNED(18)
529  PPC64_STV_UNALIGNED(19)
530  PPC64_STV_UNALIGNED(20)
531  PPC64_STV_UNALIGNED(21)
532  PPC64_STV_UNALIGNED(22)
533  PPC64_STV_UNALIGNED(23)
534  PPC64_STV_UNALIGNED(24)
535  PPC64_STV_UNALIGNED(25)
536  PPC64_STV_UNALIGNED(26)
537  PPC64_STV_UNALIGNED(27)
538  PPC64_STV_UNALIGNED(28)
539  PPC64_STV_UNALIGNED(29)
540  PPC64_STV_UNALIGNED(30)
541  PPC64_STV_UNALIGNED(31)
542
543#endif
544#endif
545
546  li    3,  0   // return UNW_ESUCCESS
547  blr
548
549
550#elif defined(__powerpc__)
551
552//
553// extern int unw_getcontext(unw_context_t* thread_state)
554//
555// On entry:
556//  thread_state pointer is in r3
557//
558#if defined(_AIX)
559DEFINE_LIBUNWIND_FUNCTION_AND_WEAK_ALIAS(__unw_getcontext, unw_getcontext)
560#else
561DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
562#endif
563  stw     0,   8(3)
564  mflr    0
565  stw     0,   0(3) // store lr as ssr0
566  stw     1,  12(3)
567  stw     4,  24(3) // Save r4 first since it will be used for fixing r2.
568#if defined(_AIX)
569  // The TOC register (r2) was changed by the glue code if unw_getcontext
570  // is called from a different module. Save the original TOC register
571  // in the context if this is the case.
572  mflr    4
573  lwz     4,  0(4)      // Get the instruction at the return address.
574  xoris   0,  4, 0x8041 // Is it reloading the TOC register "lwz 2,20(1)"?
575  cmplwi  0,  0x14
576  bne     0,  LnoR2Fix  // No need to fix up r2 if it is not.
577  lwz     2,  20(1)     // Use the saved TOC register in the stack.
578LnoR2Fix:
579#endif
580  stw     2,  16(3)
581  stw     3,  20(3)
582  stw     5,  28(3)
583  stw     6,  32(3)
584  stw     7,  36(3)
585  stw     8,  40(3)
586  stw     9,  44(3)
587  stw     10, 48(3)
588  stw     11, 52(3)
589  stw     12, 56(3)
590  stw     13, 60(3)
591  stw     14, 64(3)
592  stw     15, 68(3)
593  stw     16, 72(3)
594  stw     17, 76(3)
595  stw     18, 80(3)
596  stw     19, 84(3)
597  stw     20, 88(3)
598  stw     21, 92(3)
599  stw     22, 96(3)
600  stw     23,100(3)
601  stw     24,104(3)
602  stw     25,108(3)
603  stw     26,112(3)
604  stw     27,116(3)
605  stw     28,120(3)
606  stw     29,124(3)
607  stw     30,128(3)
608  stw     31,132(3)
609
610#if defined(__ALTIVEC__)
611  // save VRSave register
612  mfspr   0, 256
613  stw     0, 156(3)
614#endif
615  // save CR registers
616  mfcr    0
617  stw     0, 136(3)
618#if defined(_AIX)
619  // LR value from the register is not used, initialize it to 0.
620  li      0, 0
621  stw     0, 144(3)
622#endif
623  // save CTR register
624  mfctr   0
625  stw     0, 148(3)
626
627#if !defined(__NO_FPRS__)
628  // save float registers
629  stfd    0, 160(3)
630  stfd    1, 168(3)
631  stfd    2, 176(3)
632  stfd    3, 184(3)
633  stfd    4, 192(3)
634  stfd    5, 200(3)
635  stfd    6, 208(3)
636  stfd    7, 216(3)
637  stfd    8, 224(3)
638  stfd    9, 232(3)
639  stfd    10,240(3)
640  stfd    11,248(3)
641  stfd    12,256(3)
642  stfd    13,264(3)
643  stfd    14,272(3)
644  stfd    15,280(3)
645  stfd    16,288(3)
646  stfd    17,296(3)
647  stfd    18,304(3)
648  stfd    19,312(3)
649  stfd    20,320(3)
650  stfd    21,328(3)
651  stfd    22,336(3)
652  stfd    23,344(3)
653  stfd    24,352(3)
654  stfd    25,360(3)
655  stfd    26,368(3)
656  stfd    27,376(3)
657  stfd    28,384(3)
658  stfd    29,392(3)
659  stfd    30,400(3)
660  stfd    31,408(3)
661#endif
662
663#if defined(__ALTIVEC__)
664  // save vector registers
665
666  subi    4, 1, 16
667  rlwinm  4, 4, 0, 0, 27  // mask low 4-bits
668  // r4 is now a 16-byte aligned pointer into the red zone
669
670#define SAVE_VECTOR_UNALIGNED(_vec, _offset) \
671  stvx    _vec, 0, 4               SEPARATOR \
672  lwz     5, 0(4)                  SEPARATOR \
673  stw     5, _offset(3)            SEPARATOR \
674  lwz     5, 4(4)                  SEPARATOR \
675  stw     5, _offset+4(3)          SEPARATOR \
676  lwz     5, 8(4)                  SEPARATOR \
677  stw     5, _offset+8(3)          SEPARATOR \
678  lwz     5, 12(4)                 SEPARATOR \
679  stw     5, _offset+12(3)
680
681  SAVE_VECTOR_UNALIGNED( 0, 424+0x000)
682  SAVE_VECTOR_UNALIGNED( 1, 424+0x010)
683  SAVE_VECTOR_UNALIGNED( 2, 424+0x020)
684  SAVE_VECTOR_UNALIGNED( 3, 424+0x030)
685  SAVE_VECTOR_UNALIGNED( 4, 424+0x040)
686  SAVE_VECTOR_UNALIGNED( 5, 424+0x050)
687  SAVE_VECTOR_UNALIGNED( 6, 424+0x060)
688  SAVE_VECTOR_UNALIGNED( 7, 424+0x070)
689  SAVE_VECTOR_UNALIGNED( 8, 424+0x080)
690  SAVE_VECTOR_UNALIGNED( 9, 424+0x090)
691  SAVE_VECTOR_UNALIGNED(10, 424+0x0A0)
692  SAVE_VECTOR_UNALIGNED(11, 424+0x0B0)
693  SAVE_VECTOR_UNALIGNED(12, 424+0x0C0)
694  SAVE_VECTOR_UNALIGNED(13, 424+0x0D0)
695  SAVE_VECTOR_UNALIGNED(14, 424+0x0E0)
696  SAVE_VECTOR_UNALIGNED(15, 424+0x0F0)
697  SAVE_VECTOR_UNALIGNED(16, 424+0x100)
698  SAVE_VECTOR_UNALIGNED(17, 424+0x110)
699  SAVE_VECTOR_UNALIGNED(18, 424+0x120)
700  SAVE_VECTOR_UNALIGNED(19, 424+0x130)
701  SAVE_VECTOR_UNALIGNED(20, 424+0x140)
702  SAVE_VECTOR_UNALIGNED(21, 424+0x150)
703  SAVE_VECTOR_UNALIGNED(22, 424+0x160)
704  SAVE_VECTOR_UNALIGNED(23, 424+0x170)
705  SAVE_VECTOR_UNALIGNED(24, 424+0x180)
706  SAVE_VECTOR_UNALIGNED(25, 424+0x190)
707  SAVE_VECTOR_UNALIGNED(26, 424+0x1A0)
708  SAVE_VECTOR_UNALIGNED(27, 424+0x1B0)
709  SAVE_VECTOR_UNALIGNED(28, 424+0x1C0)
710  SAVE_VECTOR_UNALIGNED(29, 424+0x1D0)
711  SAVE_VECTOR_UNALIGNED(30, 424+0x1E0)
712  SAVE_VECTOR_UNALIGNED(31, 424+0x1F0)
713#endif
714
715  li      3, 0  // return UNW_ESUCCESS
716  blr
717
718
719#elif defined(__aarch64__)
720
721//
722// extern int __unw_getcontext(unw_context_t* thread_state)
723//
724// On entry:
725//  thread_state pointer is in x0
726//
727  .p2align 2
728DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
729  stp    x0, x1,  [x0, #0x000]
730  stp    x2, x3,  [x0, #0x010]
731  stp    x4, x5,  [x0, #0x020]
732  stp    x6, x7,  [x0, #0x030]
733  stp    x8, x9,  [x0, #0x040]
734  stp    x10,x11, [x0, #0x050]
735  stp    x12,x13, [x0, #0x060]
736  stp    x14,x15, [x0, #0x070]
737  stp    x16,x17, [x0, #0x080]
738  stp    x18,x19, [x0, #0x090]
739  stp    x20,x21, [x0, #0x0A0]
740  stp    x22,x23, [x0, #0x0B0]
741  stp    x24,x25, [x0, #0x0C0]
742  stp    x26,x27, [x0, #0x0D0]
743  stp    x28,x29, [x0, #0x0E0]
744  str    x30,     [x0, #0x0F0]
745  mov    x1,sp
746  str    x1,      [x0, #0x0F8]
747  str    x30,     [x0, #0x100]    // store return address as pc
748  // skip cpsr
749  stp    d0, d1,  [x0, #0x110]
750  stp    d2, d3,  [x0, #0x120]
751  stp    d4, d5,  [x0, #0x130]
752  stp    d6, d7,  [x0, #0x140]
753  stp    d8, d9,  [x0, #0x150]
754  stp    d10,d11, [x0, #0x160]
755  stp    d12,d13, [x0, #0x170]
756  stp    d14,d15, [x0, #0x180]
757  stp    d16,d17, [x0, #0x190]
758  stp    d18,d19, [x0, #0x1A0]
759  stp    d20,d21, [x0, #0x1B0]
760  stp    d22,d23, [x0, #0x1C0]
761  stp    d24,d25, [x0, #0x1D0]
762  stp    d26,d27, [x0, #0x1E0]
763  stp    d28,d29, [x0, #0x1F0]
764  str    d30,     [x0, #0x200]
765  str    d31,     [x0, #0x208]
766  mov    x0, #0                   // return UNW_ESUCCESS
767  ret
768
769#elif defined(__arm__) && !defined(__APPLE__)
770
771#if !defined(__ARM_ARCH_ISA_ARM)
772#if (__ARM_ARCH_ISA_THUMB == 2)
773  .syntax unified
774#endif
775  .thumb
776#endif
777
778@
779@ extern int __unw_getcontext(unw_context_t* thread_state)
780@
781@ On entry:
782@  thread_state pointer is in r0
783@
784@ Per EHABI #4.7 this only saves the core integer registers.
785@ EHABI #7.4.5 notes that in general all VRS registers should be restored
786@ however this is very hard to do for VFP registers because it is unknown
787@ to the library how many registers are implemented by the architecture.
788@ Instead, VFP registers are demand saved by logic external to __unw_getcontext.
789@
790  .p2align 2
791DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
792#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
793  stm r0!, {r0-r7}
794  mov r1, r8
795  mov r2, r9
796  mov r3, r10
797  stm r0!, {r1-r3}
798  mov r1, r11
799  mov r2, sp
800  mov r3, lr
801  str r1, [r0, #0]   @ r11
802  @ r12 does not need storing, it it the intra-procedure-call scratch register
803  str r2, [r0, #8]   @ sp
804  str r3, [r0, #12]  @ lr
805  str r3, [r0, #16]  @ store return address as pc
806  @ T1 does not have a non-cpsr-clobbering register-zeroing instruction.
807  @ It is safe to use here though because we are about to return, and cpsr is
808  @ not expected to be preserved.
809  movs r0, #0        @ return UNW_ESUCCESS
810#else
811  @ 32bit thumb-2 restrictions for stm:
812  @ . the sp (r13) cannot be in the list
813  @ . the pc (r15) cannot be in the list in an STM instruction
814  stm r0, {r0-r12}
815  str sp, [r0, #52]
816  str lr, [r0, #56]
817  str lr, [r0, #60]  @ store return address as pc
818  mov r0, #0         @ return UNW_ESUCCESS
819#endif
820  JMP(lr)
821
822@
823@ static void libunwind::Registers_arm::saveVFPWithFSTMD(unw_fpreg_t* values)
824@
825@ On entry:
826@  values pointer is in r0
827@
828  .p2align 2
829#if defined(__ELF__)
830  .fpu vfpv3-d16
831#endif
832DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMDEPv)
833  vstmia r0, {d0-d15}
834  JMP(lr)
835
836@
837@ static void libunwind::Registers_arm::saveVFPWithFSTMX(unw_fpreg_t* values)
838@
839@ On entry:
840@  values pointer is in r0
841@
842  .p2align 2
843#if defined(__ELF__)
844  .fpu vfpv3-d16
845#endif
846DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveVFPWithFSTMXEPv)
847  vstmia r0, {d0-d15} @ fstmiax is deprecated in ARMv7+ and now behaves like vstmia
848  JMP(lr)
849
850@
851@ static void libunwind::Registers_arm::saveVFPv3(unw_fpreg_t* values)
852@
853@ On entry:
854@  values pointer is in r0
855@
856  .p2align 2
857#if defined(__ELF__)
858  .fpu vfpv3
859#endif
860DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveVFPv3EPv)
861  @ VFP and iwMMX instructions are only available when compiling with the flags
862  @ that enable them. We do not want to do that in the library (because we do not
863  @ want the compiler to generate instructions that access those) but this is
864  @ only accessed if the personality routine needs these registers. Use of
865  @ these registers implies they are, actually, available on the target, so
866  @ it's ok to execute.
867  @ So, generate the instructions using the corresponding coprocessor mnemonic.
868  vstmia r0, {d16-d31}
869  JMP(lr)
870
871#if defined(_LIBUNWIND_ARM_WMMX)
872
873@
874@ static void libunwind::Registers_arm::saveiWMMX(unw_fpreg_t* values)
875@
876@ On entry:
877@  values pointer is in r0
878@
879  .p2align 2
880#if defined(__ELF__)
881  .arch armv5te
882#endif
883DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm9saveiWMMXEPv)
884  stcl p1, cr0, [r0], #8  @ wstrd wR0, [r0], #8
885  stcl p1, cr1, [r0], #8  @ wstrd wR1, [r0], #8
886  stcl p1, cr2, [r0], #8  @ wstrd wR2, [r0], #8
887  stcl p1, cr3, [r0], #8  @ wstrd wR3, [r0], #8
888  stcl p1, cr4, [r0], #8  @ wstrd wR4, [r0], #8
889  stcl p1, cr5, [r0], #8  @ wstrd wR5, [r0], #8
890  stcl p1, cr6, [r0], #8  @ wstrd wR6, [r0], #8
891  stcl p1, cr7, [r0], #8  @ wstrd wR7, [r0], #8
892  stcl p1, cr8, [r0], #8  @ wstrd wR8, [r0], #8
893  stcl p1, cr9, [r0], #8  @ wstrd wR9, [r0], #8
894  stcl p1, cr10, [r0], #8  @ wstrd wR10, [r0], #8
895  stcl p1, cr11, [r0], #8  @ wstrd wR11, [r0], #8
896  stcl p1, cr12, [r0], #8  @ wstrd wR12, [r0], #8
897  stcl p1, cr13, [r0], #8  @ wstrd wR13, [r0], #8
898  stcl p1, cr14, [r0], #8  @ wstrd wR14, [r0], #8
899  stcl p1, cr15, [r0], #8  @ wstrd wR15, [r0], #8
900  JMP(lr)
901
902@
903@ static void libunwind::Registers_arm::saveiWMMXControl(unw_uint32_t* values)
904@
905@ On entry:
906@  values pointer is in r0
907@
908  .p2align 2
909#if defined(__ELF__)
910  .arch armv5te
911#endif
912DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm16saveiWMMXControlEPj)
913  stc2 p1, cr8, [r0], #4  @ wstrw wCGR0, [r0], #4
914  stc2 p1, cr9, [r0], #4  @ wstrw wCGR1, [r0], #4
915  stc2 p1, cr10, [r0], #4  @ wstrw wCGR2, [r0], #4
916  stc2 p1, cr11, [r0], #4  @ wstrw wCGR3, [r0], #4
917  JMP(lr)
918
919#endif
920
921#elif defined(__or1k__)
922
923#
924# extern int __unw_getcontext(unw_context_t* thread_state)
925#
926# On entry:
927#  thread_state pointer is in r3
928#
929DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
930  l.sw       0(r3), r0
931  l.sw       4(r3), r1
932  l.sw       8(r3), r2
933  l.sw      12(r3), r3
934  l.sw      16(r3), r4
935  l.sw      20(r3), r5
936  l.sw      24(r3), r6
937  l.sw      28(r3), r7
938  l.sw      32(r3), r8
939  l.sw      36(r3), r9
940  l.sw      40(r3), r10
941  l.sw      44(r3), r11
942  l.sw      48(r3), r12
943  l.sw      52(r3), r13
944  l.sw      56(r3), r14
945  l.sw      60(r3), r15
946  l.sw      64(r3), r16
947  l.sw      68(r3), r17
948  l.sw      72(r3), r18
949  l.sw      76(r3), r19
950  l.sw      80(r3), r20
951  l.sw      84(r3), r21
952  l.sw      88(r3), r22
953  l.sw      92(r3), r23
954  l.sw      96(r3), r24
955  l.sw     100(r3), r25
956  l.sw     104(r3), r26
957  l.sw     108(r3), r27
958  l.sw     112(r3), r28
959  l.sw     116(r3), r29
960  l.sw     120(r3), r30
961  l.sw     124(r3), r31
962  # store ra to pc
963  l.sw     128(r3), r9
964  # zero epcr
965  l.sw     132(r3), r0
966
967#elif defined(__hexagon__)
968#
969# extern int unw_getcontext(unw_context_t* thread_state)
970#
971# On entry:
972#  thread_state pointer is in r0
973#
974#define OFFSET(offset) (offset/4)
975DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
976  memw(r0+#32) = r8
977  memw(r0+#36) = r9
978  memw(r0+#40) = r10
979  memw(r0+#44) = r11
980
981  memw(r0+#48) = r12
982  memw(r0+#52) = r13
983  memw(r0+#56) = r14
984  memw(r0+#60) = r15
985
986  memw(r0+#64) = r16
987  memw(r0+#68) = r17
988  memw(r0+#72) = r18
989  memw(r0+#76) = r19
990
991  memw(r0+#80) = r20
992  memw(r0+#84) = r21
993  memw(r0+#88) = r22
994  memw(r0+#92) = r23
995
996  memw(r0+#96) = r24
997  memw(r0+#100) = r25
998  memw(r0+#104) = r26
999  memw(r0+#108) = r27
1000
1001  memw(r0+#112) = r28
1002  memw(r0+#116) = r29
1003  memw(r0+#120) = r30
1004  memw(r0+#124) = r31
1005  r1 = c4   // Predicate register
1006  memw(r0+#128) = r1
1007  r1 = memw(r30)           // *FP == Saved FP
1008  r1 = r31
1009  memw(r0+#132) = r1
1010
1011  jumpr r31
1012
1013#elif defined(__sparc__) && defined(__arch64__)
1014
1015#
1016# extern int __unw_getcontext(unw_context_t* thread_state)
1017#
1018# On entry:
1019#  thread_state pointer is in %o0
1020#
1021DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1022  .register %g2, #scratch
1023  .register %g3, #scratch
1024  .register %g6, #scratch
1025  .register %g7, #scratch
1026  stx  %g1, [%o0 + 0x08]
1027  stx  %g2, [%o0 + 0x10]
1028  stx  %g3, [%o0 + 0x18]
1029  stx  %g4, [%o0 + 0x20]
1030  stx  %g5, [%o0 + 0x28]
1031  stx  %g6, [%o0 + 0x30]
1032  stx  %g7, [%o0 + 0x38]
1033  stx  %o0, [%o0 + 0x40]
1034  stx  %o1, [%o0 + 0x48]
1035  stx  %o2, [%o0 + 0x50]
1036  stx  %o3, [%o0 + 0x58]
1037  stx  %o4, [%o0 + 0x60]
1038  stx  %o5, [%o0 + 0x68]
1039  stx  %o6, [%o0 + 0x70]
1040  stx  %o7, [%o0 + 0x78]
1041  stx  %l0, [%o0 + 0x80]
1042  stx  %l1, [%o0 + 0x88]
1043  stx  %l2, [%o0 + 0x90]
1044  stx  %l3, [%o0 + 0x98]
1045  stx  %l4, [%o0 + 0xa0]
1046  stx  %l5, [%o0 + 0xa8]
1047  stx  %l6, [%o0 + 0xb0]
1048  stx  %l7, [%o0 + 0xb8]
1049  stx  %i0, [%o0 + 0xc0]
1050  stx  %i1, [%o0 + 0xc8]
1051  stx  %i2, [%o0 + 0xd0]
1052  stx  %i3, [%o0 + 0xd8]
1053  stx  %i4, [%o0 + 0xe0]
1054  stx  %i5, [%o0 + 0xe8]
1055  stx  %i6, [%o0 + 0xf0]
1056  stx  %i7, [%o0 + 0xf8]
1057
1058  # save StackGhost cookie
1059  mov  %i7, %g4
1060  save %sp, -176, %sp
1061  # register window flush necessary even without StackGhost
1062  flushw
1063  restore
1064  ldx  [%sp + 2047 + 0x78], %g5
1065  xor  %g4, %g5, %g4
1066  stx  %g4, [%o0 + 0x100]
1067  retl
1068  # return UNW_ESUCCESS
1069   clr %o0
1070
1071#elif defined(__sparc__)
1072
1073#
1074# extern int __unw_getcontext(unw_context_t* thread_state)
1075#
1076# On entry:
1077#  thread_state pointer is in o0
1078#
1079DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1080  ta 3
1081  add %o7, 8, %o7
1082  std %g0, [%o0 +   0]
1083  std %g2, [%o0 +   8]
1084  std %g4, [%o0 +  16]
1085  std %g6, [%o0 +  24]
1086  std %o0, [%o0 +  32]
1087  std %o2, [%o0 +  40]
1088  std %o4, [%o0 +  48]
1089  std %o6, [%o0 +  56]
1090  std %l0, [%o0 +  64]
1091  std %l2, [%o0 +  72]
1092  std %l4, [%o0 +  80]
1093  std %l6, [%o0 +  88]
1094  std %i0, [%o0 +  96]
1095  std %i2, [%o0 + 104]
1096  std %i4, [%o0 + 112]
1097  std %i6, [%o0 + 120]
1098  jmp %o7
1099   clr %o0                   // return UNW_ESUCCESS
1100
1101#elif defined(__riscv)
1102
1103#
1104# extern int __unw_getcontext(unw_context_t* thread_state)
1105#
1106# On entry:
1107#  thread_state pointer is in a0
1108#
1109DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1110  ISTORE    x1, (RISCV_ISIZE * 0)(a0) // store ra as pc
1111  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1112    ISTORE x\i, (RISCV_ISIZE * \i)(a0)
1113  .endr
1114
1115# if defined(__riscv_flen)
1116  .irp i,FROM_0_TO_31
1117    FSTORE f\i, (RISCV_FOFFSET + RISCV_FSIZE * \i)(a0)
1118  .endr
1119# endif
1120
1121  li     a0, 0  // return UNW_ESUCCESS
1122  ret           // jump to ra
1123
1124#elif defined(__s390x__)
1125
1126//
1127// extern int __unw_getcontext(unw_context_t* thread_state)
1128//
1129// On entry:
1130//  thread_state pointer is in r2
1131//
1132DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1133
1134  // Save GPRs
1135  stmg %r0, %r15, 16(%r2)
1136
1137  // Save PSWM
1138  epsw %r0, %r1
1139  stm %r0, %r1, 0(%r2)
1140
1141  // Store return address as PSWA
1142  stg %r14, 8(%r2)
1143
1144  // Save FPRs
1145  .irp i,FROM_0_TO_15
1146    std %f\i, (144+8*\i)(%r2)
1147  .endr
1148
1149  // Return UNW_ESUCCESS
1150  lghi %r2, 0
1151  br %r14
1152
1153#elif defined(__loongarch__) && __loongarch_grlen == 64
1154
1155#
1156# extern int __unw_getcontext(unw_context_t* thread_state)
1157#
1158# On entry:
1159#  thread_state pointer is in $a0($r4)
1160#
1161DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
1162  .irp i,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
1163    st.d $r\i, $a0, (8*\i)
1164  .endr
1165  st.d    $r1,  $a0, (8 * 32) // store $ra to pc
1166
1167# if __loongarch_frlen == 64
1168  .irp i,FROM_0_TO_31
1169    fst.d $f\i, $a0, (8 * 33 + 8 * \i)
1170  .endr
1171# endif
1172
1173  move     $a0, $zero  // UNW_ESUCCESS
1174  jr       $ra
1175
1176#endif
1177
1178  WEAK_ALIAS(__unw_getcontext, unw_getcontext)
1179
1180#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1181
1182NO_EXEC_STACK_DIRECTIVE
1183