1
2
3#if __i386__
4	.text
5	.globl __ZN9libunwind13Registers_x866jumptoEv
6	.private_extern __ZN9libunwind13Registers_x866jumptoEv
7__ZN9libunwind13Registers_x866jumptoEv:
8#
9# void libunwind::Registers_x86::jumpto()
10#
11# On entry:
12#	+					    +
13#   +-----------------------+
14#	+ thread_state pointer  +
15#   +-----------------------+
16#	+ return address	    +
17#   +-----------------------+   <-- SP
18#	+					    +
19	movl	 4(%esp), %eax
20	# set up eax and ret on new stack location
21	movl	28(%eax), %edx # edx holds new stack pointer
22	subl	$8,%edx
23	movl	%edx, 28(%eax)
24	movl	0(%eax), %ebx
25	movl	%ebx, 0(%edx)
26	movl	40(%eax), %ebx
27	movl	%ebx, 4(%edx)
28	# we now have ret and eax pushed onto where new stack will be
29	# restore all registers
30	movl	 4(%eax), %ebx
31	movl	 8(%eax), %ecx
32	movl	12(%eax), %edx
33	movl	16(%eax), %edi
34	movl	20(%eax), %esi
35	movl	24(%eax), %ebp
36	movl	28(%eax), %esp
37	# skip ss
38	# skip eflags
39	pop		%eax	# eax was already pushed on new stack
40	ret				# eip was already pushed on new stack
41	# skip cs
42	# skip ds
43	# skip es
44	# skip fs
45	# skip gs
46
47#elif __x86_64__
48
49	.text
50	.globl __ZN9libunwind16Registers_x86_646jumptoEv
51	.private_extern __ZN9libunwind16Registers_x86_646jumptoEv
52__ZN9libunwind16Registers_x86_646jumptoEv:
53#
54# void libunwind::Registers_x86_64::jumpto()
55#
56# On entry, thread_state pointer is in rdi
57
58	movq	56(%rdi), %rax # rax holds new stack pointer
59	subq	$16, %rax
60	movq	%rax, 56(%rdi)
61	movq	32(%rdi), %rbx	# store new rdi on new stack
62	movq	%rbx, 0(%rax)
63	movq	128(%rdi), %rbx # store new rip on new stack
64	movq	%rbx, 8(%rax)
65	# restore all registers
66	movq	  0(%rdi), %rax
67	movq	  8(%rdi), %rbx
68	movq	 16(%rdi), %rcx
69	movq	 24(%rdi), %rdx
70	# restore rdi later
71	movq	 40(%rdi), %rsi
72	movq	 48(%rdi), %rbp
73	# restore rsp later
74	movq	 64(%rdi), %r8
75	movq	 72(%rdi), %r9
76	movq	 80(%rdi), %r10
77	movq	 88(%rdi), %r11
78	movq	 96(%rdi), %r12
79	movq	104(%rdi), %r13
80	movq	112(%rdi), %r14
81	movq	120(%rdi), %r15
82	# skip rflags
83	# skip cs
84	# skip fs
85	# skip gs
86	movq	56(%rdi), %rsp	# cut back rsp to new location
87	pop		%rdi			# rdi was saved here earlier
88	ret						# rip was saved here
89
90
91#elif __ppc__
92
93	.text
94	.globl __ZN9libunwind13Registers_ppc6jumptoEv
95	.private_extern __ZN9libunwind13Registers_ppc6jumptoEv
96__ZN9libunwind13Registers_ppc6jumptoEv:
97;
98; void libunwind::Registers_ppc::jumpto()
99;
100; On entry:
101;	thread_state pointer is in r3
102;
103
104	; restore integral registerrs
105	; skip r0 for now
106	; skip r1 for now
107	lwz		 r2, 16(r3)
108	; skip r3 for now
109	; skip r4 for now
110	; skip r5 for now
111	lwz		 r6, 32(r3)
112	lwz		 r7, 36(r3)
113	lwz		 r8, 40(r3)
114	lwz		 r9, 44(r3)
115	lwz		r10, 48(r3)
116	lwz		r11, 52(r3)
117	lwz		r12, 56(r3)
118	lwz		r13, 60(r3)
119	lwz		r14, 64(r3)
120	lwz		r15, 68(r3)
121	lwz		r16, 72(r3)
122	lwz		r17, 76(r3)
123	lwz		r18, 80(r3)
124	lwz		r19, 84(r3)
125	lwz		r20, 88(r3)
126	lwz		r21, 92(r3)
127	lwz		r22, 96(r3)
128	lwz		r23,100(r3)
129	lwz		r24,104(r3)
130	lwz		r25,108(r3)
131	lwz		r26,112(r3)
132	lwz		r27,116(r3)
133	lwz		r28,120(r3)
134	lwz		r29,124(r3)
135	lwz		r30,128(r3)
136	lwz		r31,132(r3)
137
138	; restore float registers
139	lfd		f0, 160(r3)
140	lfd		f1, 168(r3)
141	lfd		f2, 176(r3)
142	lfd		f3, 184(r3)
143	lfd		f4, 192(r3)
144	lfd		f5, 200(r3)
145	lfd		f6, 208(r3)
146	lfd		f7, 216(r3)
147	lfd		f8, 224(r3)
148	lfd		f9, 232(r3)
149	lfd		f10,240(r3)
150	lfd		f11,248(r3)
151	lfd		f12,256(r3)
152	lfd		f13,264(r3)
153	lfd		f14,272(r3)
154	lfd		f15,280(r3)
155	lfd		f16,288(r3)
156	lfd		f17,296(r3)
157	lfd		f18,304(r3)
158	lfd		f19,312(r3)
159	lfd		f20,320(r3)
160	lfd		f21,328(r3)
161	lfd		f22,336(r3)
162	lfd		f23,344(r3)
163	lfd		f24,352(r3)
164	lfd		f25,360(r3)
165	lfd		f26,368(r3)
166	lfd		f27,376(r3)
167	lfd		f28,384(r3)
168	lfd		f29,392(r3)
169	lfd		f30,400(r3)
170	lfd		f31,408(r3)
171
172	; restore vector registers if any are in use
173	lwz		r5,156(r3)	; test VRsave
174	cmpwi	r5,0
175	beq		Lnovec
176
177	subi	r4,r1,16
178	rlwinm	r4,r4,0,0,27	; mask low 4-bits
179	; r4 is now a 16-byte aligned pointer into the red zone
180	; the fVectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
181
182
183#define LOAD_VECTOR_UNALIGNEDl(_index) \
184	andis.	r0,r5,(1<<(15-_index))	@\
185	beq		Ldone  ## _index 		@\
186	lwz		r0, 424+_index*16(r3)	@\
187	stw		r0, 0(r4)				@\
188	lwz		r0, 424+_index*16+4(r3)	@\
189	stw		r0, 4(r4)				@\
190	lwz		r0, 424+_index*16+8(r3)	@\
191	stw		r0, 8(r4)				@\
192	lwz		r0, 424+_index*16+12(r3)@\
193	stw		r0, 12(r4)				@\
194	lvx		v ## _index,0,r4		@\
195Ldone  ## _index:
196
197#define LOAD_VECTOR_UNALIGNEDh(_index) \
198	andi.	r0,r5,(1<<(31-_index))	@\
199	beq		Ldone  ## _index		@\
200	lwz		r0, 424+_index*16(r3)	@\
201	stw		r0, 0(r4)				@\
202	lwz		r0, 424+_index*16+4(r3)	@\
203	stw		r0, 4(r4)				@\
204	lwz		r0, 424+_index*16+8(r3)	@\
205	stw		r0, 8(r4)				@\
206	lwz		r0, 424+_index*16+12(r3)@\
207	stw		r0, 12(r4)				@\
208	lvx		v ## _index,0,r4		@\
209	Ldone  ## _index:
210
211
212	LOAD_VECTOR_UNALIGNEDl(0)
213	LOAD_VECTOR_UNALIGNEDl(1)
214	LOAD_VECTOR_UNALIGNEDl(2)
215	LOAD_VECTOR_UNALIGNEDl(3)
216	LOAD_VECTOR_UNALIGNEDl(4)
217	LOAD_VECTOR_UNALIGNEDl(5)
218	LOAD_VECTOR_UNALIGNEDl(6)
219	LOAD_VECTOR_UNALIGNEDl(7)
220	LOAD_VECTOR_UNALIGNEDl(8)
221	LOAD_VECTOR_UNALIGNEDl(9)
222	LOAD_VECTOR_UNALIGNEDl(10)
223	LOAD_VECTOR_UNALIGNEDl(11)
224	LOAD_VECTOR_UNALIGNEDl(12)
225	LOAD_VECTOR_UNALIGNEDl(13)
226	LOAD_VECTOR_UNALIGNEDl(14)
227	LOAD_VECTOR_UNALIGNEDl(15)
228	LOAD_VECTOR_UNALIGNEDh(16)
229	LOAD_VECTOR_UNALIGNEDh(17)
230	LOAD_VECTOR_UNALIGNEDh(18)
231	LOAD_VECTOR_UNALIGNEDh(19)
232	LOAD_VECTOR_UNALIGNEDh(20)
233	LOAD_VECTOR_UNALIGNEDh(21)
234	LOAD_VECTOR_UNALIGNEDh(22)
235	LOAD_VECTOR_UNALIGNEDh(23)
236	LOAD_VECTOR_UNALIGNEDh(24)
237	LOAD_VECTOR_UNALIGNEDh(25)
238	LOAD_VECTOR_UNALIGNEDh(26)
239	LOAD_VECTOR_UNALIGNEDh(27)
240	LOAD_VECTOR_UNALIGNEDh(28)
241	LOAD_VECTOR_UNALIGNEDh(29)
242	LOAD_VECTOR_UNALIGNEDh(30)
243	LOAD_VECTOR_UNALIGNEDh(31)
244
245Lnovec:
246	lwz		r0, 136(r3) ; __cr
247	mtocrf	255,r0
248	lwz		r0, 148(r3) ; __ctr
249	mtctr	r0
250	lwz		r0, 0(r3)	; __ssr0
251	mtctr	r0
252	lwz		r0, 8(r3)   ; do r0 now
253	lwz		r5,28(r3)	; do r5 now
254	lwz		r4,24(r3)	; do r4 now
255	lwz		r1,12(r3)	; do sp now
256	lwz		r3,20(r3)   ; do r3 last
257	bctr
258
259
260#endif
261
262