1/* Support functions for the unwinder.
2   Copyright (C) 2003-2015 Free Software Foundation, Inc.
3   Contributed by Paul Brook
4
5   This file is free software; you can redistribute it and/or modify it
6   under the terms of the GNU General Public License as published by the
7   Free Software Foundation; either version 3, or (at your option) any
8   later version.
9
10   This file is distributed in the hope that it will be useful, but
11   WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13   General Public License for more details.
14
15   Under Section 7 of GPL version 3, you are granted additional
16   permissions described in the GCC Runtime Library Exception, version
17   3.1, as published by the Free Software Foundation.
18
19   You should have received a copy of the GNU General Public License and
20   a copy of the GCC Runtime Library Exception along with this program;
21   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
22   <http://www.gnu.org/licenses/>.  */
23
24/* An executable stack is *not* required for these functions.  */
25#if defined(__ELF__) && defined(__linux__)
26.section .note.GNU-stack,"",%progbits
27.previous
28#endif
29
30#ifdef __ARM_EABI__
31/* Some attributes that are common to all routines in this file.  */
32	/* Tag_ABI_align_needed: This code does not require 8-byte
33	   alignment from the caller.  */
34	/* .eabi_attribute 24, 0  -- default setting.  */
35	/* Tag_ABI_align_preserved: This code preserves 8-byte
36	   alignment in any callee.  */
37	.eabi_attribute 25, 1
38#endif /* __ARM_EABI__ */
39
40#ifndef __symbian__
41
42#include "lib1funcs.S"
43
44.macro UNPREFIX name
45	.global SYM (\name)
46	EQUIV SYM (\name), SYM (__\name)
47.endm
48
49#if (__ARM_ARCH__ == 4)
50/* Some coprocessors require armv5.  We know this code will never be run on
51   other cpus.  Tell gas to allow armv5, but only mark the objects as armv4.
52 */
53.arch armv5t
54#ifdef __ARM_ARCH_4T__
55.object_arch armv4t
56#else
57.object_arch armv4
58#endif
59#endif
60
61#ifdef __ARM_ARCH_6M__
62
63/* r0 points to a 16-word block.  Upload these values to the actual core
64   state.  */
65FUNC_START restore_core_regs
66	mov r1, r0
67	add r1, r1, #52
68	ldmia r1!, {r3, r4, r5}
69	sub r3, r3, #4
70	mov ip, r3
71	str r5, [r3]
72	mov lr, r4
73	/* Restore r8-r11.  */
74	mov r1, r0
75	add r1, r1, #32
76	ldmia r1!, {r2, r3, r4, r5}
77	mov r8, r2
78	mov r9, r3
79	mov sl, r4
80	mov fp, r5
81	mov r1, r0
82	add r1, r1, #8
83	ldmia r1!, {r2, r3, r4, r5, r6, r7}
84	ldr r1, [r0, #4]
85	ldr r0, [r0]
86	mov sp, ip
87	pop {pc}
88	FUNC_END restore_core_regs
89	UNPREFIX restore_core_regs
90
91/* ARMV6M does not have coprocessors, so these should never be used.  */
92FUNC_START gnu_Unwind_Restore_VFP
93	RET
94
95/* Store VFR regsters d0-d15 to the address in r0.  */
96FUNC_START gnu_Unwind_Save_VFP
97	RET
98
99/* Load VFP registers d0-d15 from the address in r0.
100   Use this to load from FSTMD format.  */
101FUNC_START gnu_Unwind_Restore_VFP_D
102	RET
103
104/* Store VFP registers d0-d15 to the address in r0.
105   Use this to store in FLDMD format.  */
106FUNC_START gnu_Unwind_Save_VFP_D
107	RET
108
109/* Load VFP registers d16-d31 from the address in r0.
110   Use this to load from FSTMD (=VSTM) format.  Needs VFPv3.  */
111FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31
112	RET
113
114/* Store VFP registers d16-d31 to the address in r0.
115   Use this to store in FLDMD (=VLDM) format.  Needs VFPv3.  */
116FUNC_START gnu_Unwind_Save_VFP_D_16_to_31
117	RET
118
119FUNC_START gnu_Unwind_Restore_WMMXD
120	RET
121
122FUNC_START gnu_Unwind_Save_WMMXD
123	RET
124
125FUNC_START gnu_Unwind_Restore_WMMXC
126	RET
127
128FUNC_START gnu_Unwind_Save_WMMXC
129	RET
130
131.macro  UNWIND_WRAPPER name nargs
132	FUNC_START \name
133	/* Create a phase2_vrs structure.  */
134	/* Save r0 in the PC slot so we can use it as a scratch register.  */
135	push {r0}
136	add r0, sp, #4
137	push {r0, lr} /* Push original SP and LR.  */
138	/* Make space for r8-r12.  */
139	sub sp, sp, #20
140	/* Save low registers.  */
141	push {r0, r1, r2, r3, r4, r5, r6, r7}
142	/* Save high registers.  */
143	add r0, sp, #32
144	mov r1, r8
145	mov r2, r9
146	mov r3, sl
147	mov r4, fp
148	mov r5, ip
149	stmia r0!, {r1, r2, r3, r4, r5}
150	/* Restore original low register values.  */
151	add r0, sp, #4
152	ldmia r0!, {r1, r2, r3, r4, r5}
153	/* Restore orginial r0.  */
154	ldr r0, [sp, #60]
155	str r0, [sp]
156	/* Demand-save flags, plus an extra word for alignment.  */
157	mov r3, #0
158	push {r2, r3}
159	/* Point r1 at the block.  Pass r[0..nargs) unchanged.  */
160	add r\nargs, sp, #4
161
162	bl SYM (__gnu\name)
163
164	ldr r3, [sp, #64]
165	add sp, sp, #72
166	bx r3
167
168	FUNC_END \name
169	UNPREFIX \name
170.endm
171
172#else /* !__ARM_ARCH_6M__ */
173
174/* r0 points to a 16-word block.  Upload these values to the actual core
175   state.  */
176ARM_FUNC_START restore_core_regs
177	/* We must use sp as the base register when restoring sp.  Push the
178	   last 3 registers onto the top of the current stack to achieve
179	   this.  */
180	add r1, r0, #52
181	ldmia r1, {r3, r4, r5}  /* {sp, lr, pc}.  */
182#if defined(__thumb2__)
183	/* Thumb-2 doesn't allow sp in a load-multiple instruction, so push
184	   the target address onto the target stack.  This is safe as
185	   we're always returning to somewhere further up the call stack.  */
186	mov ip, r3
187	mov lr, r4
188	str r5, [ip, #-4]!
189#elif defined(__INTERWORKING__)
190	/* Restore pc into ip.  */
191	mov r2, r5
192	stmfd sp!, {r2, r3, r4}
193#else
194	stmfd sp!, {r3, r4, r5}
195#endif
196	/* Don't bother restoring ip.  */
197	ldmia r0, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp}
198#if defined(__thumb2__)
199	/* Pop the return address off the target stack.  */
200	mov sp, ip
201	pop {pc}
202#elif defined(__INTERWORKING__)
203	/* Pop the three registers we pushed earlier.  */
204	ldmfd sp, {ip, sp, lr}
205	bx ip
206#else
207	ldmfd sp, {sp, lr, pc}
208#endif
209	FUNC_END restore_core_regs
210	UNPREFIX restore_core_regs
211
212/* Load VFP registers d0-d15 from the address in r0.
213   Use this to load from FSTMX format.  */
214ARM_FUNC_START gnu_Unwind_Restore_VFP
215	/* Use the generic coprocessor form so that gas doesn't complain
216	   on soft-float targets.  */
217	ldc   p11,cr0,[r0],{0x21} /* fldmiax r0, {d0-d15} */
218	RET
219
220/* Store VFP registers d0-d15 to the address in r0.
221   Use this to store in FSTMX format.  */
222ARM_FUNC_START gnu_Unwind_Save_VFP
223	/* Use the generic coprocessor form so that gas doesn't complain
224	   on soft-float targets.  */
225	stc   p11,cr0,[r0],{0x21} /* fstmiax r0, {d0-d15} */
226	RET
227
228/* Load VFP registers d0-d15 from the address in r0.
229   Use this to load from FSTMD format.  */
230ARM_FUNC_START gnu_Unwind_Restore_VFP_D
231	ldc   p11,cr0,[r0],{0x20} /* fldmiad r0, {d0-d15} */
232	RET
233
234/* Store VFP registers d0-d15 to the address in r0.
235   Use this to store in FLDMD format.  */
236ARM_FUNC_START gnu_Unwind_Save_VFP_D
237	stc   p11,cr0,[r0],{0x20} /* fstmiad r0, {d0-d15} */
238	RET
239
240/* Load VFP registers d16-d31 from the address in r0.
241   Use this to load from FSTMD (=VSTM) format.  Needs VFPv3.  */
242ARM_FUNC_START gnu_Unwind_Restore_VFP_D_16_to_31
243	ldcl  p11,cr0,[r0],{0x20} /* vldm r0, {d16-d31} */
244	RET
245
246/* Store VFP registers d16-d31 to the address in r0.
247   Use this to store in FLDMD (=VLDM) format.  Needs VFPv3.  */
248ARM_FUNC_START gnu_Unwind_Save_VFP_D_16_to_31
249	stcl  p11,cr0,[r0],{0x20} /* vstm r0, {d16-d31} */
250	RET
251
252ARM_FUNC_START gnu_Unwind_Restore_WMMXD
253	/* Use the generic coprocessor form so that gas doesn't complain
254	   on non-iWMMXt targets.  */
255	ldcl  p1, cr0, [r0], #8 /* wldrd wr0, [r0], #8 */
256	ldcl  p1, cr1, [r0], #8 /* wldrd wr1, [r0], #8 */
257	ldcl  p1, cr2, [r0], #8 /* wldrd wr2, [r0], #8 */
258	ldcl  p1, cr3, [r0], #8 /* wldrd wr3, [r0], #8 */
259	ldcl  p1, cr4, [r0], #8 /* wldrd wr4, [r0], #8 */
260	ldcl  p1, cr5, [r0], #8 /* wldrd wr5, [r0], #8 */
261	ldcl  p1, cr6, [r0], #8 /* wldrd wr6, [r0], #8 */
262	ldcl  p1, cr7, [r0], #8 /* wldrd wr7, [r0], #8 */
263	ldcl  p1, cr8, [r0], #8 /* wldrd wr8, [r0], #8 */
264	ldcl  p1, cr9, [r0], #8 /* wldrd wr9, [r0], #8 */
265	ldcl  p1, cr10, [r0], #8 /* wldrd wr10, [r0], #8 */
266	ldcl  p1, cr11, [r0], #8 /* wldrd wr11, [r0], #8 */
267	ldcl  p1, cr12, [r0], #8 /* wldrd wr12, [r0], #8 */
268	ldcl  p1, cr13, [r0], #8 /* wldrd wr13, [r0], #8 */
269	ldcl  p1, cr14, [r0], #8 /* wldrd wr14, [r0], #8 */
270	ldcl  p1, cr15, [r0], #8 /* wldrd wr15, [r0], #8 */
271	RET
272
273ARM_FUNC_START gnu_Unwind_Save_WMMXD
274	/* Use the generic coprocessor form so that gas doesn't complain
275	   on non-iWMMXt targets.  */
276	stcl  p1, cr0, [r0], #8 /* wstrd wr0, [r0], #8 */
277	stcl  p1, cr1, [r0], #8 /* wstrd wr1, [r0], #8 */
278	stcl  p1, cr2, [r0], #8 /* wstrd wr2, [r0], #8 */
279	stcl  p1, cr3, [r0], #8 /* wstrd wr3, [r0], #8 */
280	stcl  p1, cr4, [r0], #8 /* wstrd wr4, [r0], #8 */
281	stcl  p1, cr5, [r0], #8 /* wstrd wr5, [r0], #8 */
282	stcl  p1, cr6, [r0], #8 /* wstrd wr6, [r0], #8 */
283	stcl  p1, cr7, [r0], #8 /* wstrd wr7, [r0], #8 */
284	stcl  p1, cr8, [r0], #8 /* wstrd wr8, [r0], #8 */
285	stcl  p1, cr9, [r0], #8 /* wstrd wr9, [r0], #8 */
286	stcl  p1, cr10, [r0], #8 /* wstrd wr10, [r0], #8 */
287	stcl  p1, cr11, [r0], #8 /* wstrd wr11, [r0], #8 */
288	stcl  p1, cr12, [r0], #8 /* wstrd wr12, [r0], #8 */
289	stcl  p1, cr13, [r0], #8 /* wstrd wr13, [r0], #8 */
290	stcl  p1, cr14, [r0], #8 /* wstrd wr14, [r0], #8 */
291	stcl  p1, cr15, [r0], #8 /* wstrd wr15, [r0], #8 */
292	RET
293
294ARM_FUNC_START gnu_Unwind_Restore_WMMXC
295	/* Use the generic coprocessor form so that gas doesn't complain
296	   on non-iWMMXt targets.  */
297	ldc2  p1, cr8, [r0], #4 /* wldrw wcgr0, [r0], #4 */
298	ldc2  p1, cr9, [r0], #4 /* wldrw wcgr1, [r0], #4 */
299	ldc2  p1, cr10, [r0], #4 /* wldrw wcgr2, [r0], #4 */
300	ldc2  p1, cr11, [r0], #4 /* wldrw wcgr3, [r0], #4 */
301	RET
302
303ARM_FUNC_START gnu_Unwind_Save_WMMXC
304	/* Use the generic coprocessor form so that gas doesn't complain
305	   on non-iWMMXt targets.  */
306	stc2  p1, cr8, [r0], #4 /* wstrw wcgr0, [r0], #4 */
307	stc2  p1, cr9, [r0], #4 /* wstrw wcgr1, [r0], #4 */
308	stc2  p1, cr10, [r0], #4 /* wstrw wcgr2, [r0], #4 */
309	stc2  p1, cr11, [r0], #4 /* wstrw wcgr3, [r0], #4 */
310	RET
311
312/* Wrappers to save core registers, then call the real routine.   */
313
314.macro  UNWIND_WRAPPER name nargs
315	ARM_FUNC_START \name
316	/* Create a phase2_vrs structure.  */
317	/* Split reg push in two to ensure the correct value for sp.  */
318#if defined(__thumb2__)
319	mov ip, sp
320	push {lr} /* PC is ignored.  */
321	push {ip, lr} /* Push original SP and LR.  */
322#else
323	stmfd sp!, {sp, lr, pc}
324#endif
325	stmfd sp!, {r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, sl, fp, ip}
326
327	/* Demand-save flags, plus an extra word for alignment.  */
328	mov r3, #0
329	stmfd sp!, {r2, r3}
330
331	/* Point r1 at the block.  Pass r[0..nargs) unchanged.  */
332	add r\nargs, sp, #4
333#if defined(__thumb__) && !defined(__thumb2__)
334	/* Switch back to thumb mode to avoid interworking hassle.  */
335	adr ip, .L1_\name
336	orr ip, ip, #1
337	bx ip
338	.thumb
339.L1_\name:
340	bl SYM (__gnu\name) __PLT__
341	ldr r3, [sp, #64]
342	add sp, #72
343	bx r3
344#else
345	bl SYM (__gnu\name) __PLT__
346	ldr lr, [sp, #64]
347	add sp, sp, #72
348	RET
349#endif
350	FUNC_END \name
351	UNPREFIX \name
352.endm
353
354#endif /* !__ARM_ARCH_6M__ */
355
356UNWIND_WRAPPER _Unwind_RaiseException 1
357UNWIND_WRAPPER _Unwind_Resume 1
358UNWIND_WRAPPER _Unwind_Resume_or_Rethrow 1
359UNWIND_WRAPPER _Unwind_ForcedUnwind 3
360UNWIND_WRAPPER _Unwind_Backtrace 2
361
362#endif  /* ndef __symbian__ */
363