solaris_sparc.il revision 5776:de6a9e811145
1251314Sed//
2251314Sed// Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
3251314Sed// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4251314Sed//
5251314Sed// This code is free software; you can redistribute it and/or modify it
6251314Sed// under the terms of the GNU General Public License version 2 only, as
7251314Sed// published by the Free Software Foundation.
8251314Sed//
9251314Sed// This code is distributed in the hope that it will be useful, but WITHOUT
10251314Sed// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11251314Sed// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12251314Sed// version 2 for more details (a copy is included in the LICENSE file that
13251314Sed// accompanied this code).
14251314Sed//
15251314Sed// You should have received a copy of the GNU General Public License version
16251314Sed// 2 along with this work; if not, write to the Free Software Foundation,
17251314Sed// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18251314Sed//
19251314Sed// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20251314Sed// or visit www.oracle.com if you need additional information or have any
21251314Sed// questions.
22251314Sed//  
23251314Sed//
24251314Sed
25251314Sed  // Get the raw thread ID from %g7
26251314Sed
27251314Sed       .inline  _raw_thread_id, 0
28251314Sed       .register %g7,#scratch
29251314Sed       .volatile
30251314Sed       mov     %g7, %o0
31251314Sed       .nonvolatile
32251314Sed       .end
33251314Sed
34251314Sed
35251314Sed  // Clear SPARC fprs.FEF DU and DL bits --
36251314Sed  // allows the kernel to avoid saving FPU state at context-switch time.
37251314Sed  // Use for state-transition points (into _thread_blocked) or when
38251314Sed  // parking. 
39251314Sed      
40251314Sed       .inline _mark_fpu_nosave, 0
41251314Sed       .volatile
42251314Sed       wr   %g0, 0, %fprs       
43251314Sed       .nonvolatile
44251314Sed       .end
45251314Sed
46251314Sed  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
47251314Sed  //
48251314Sed  // Arguments:
49251314Sed  //      exchange_value: O0
50251314Sed  //      dest:           O1
51251314Sed  //
52251314Sed  // Results:
53251314Sed  //     O0: the value previously stored in dest
54251314Sed
55251314Sed        .inline _Atomic_swap32, 2
56251314Sed        .volatile
57251314Sed        swap    [%o1],%o0
58251314Sed        .nonvolatile
59251314Sed        .end
60252547Speter
61252547Speter
62251314Sed  // Support for intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t * dest).
63251314Sed  //
64251314Sed  // 64-bit
65251314Sed  //
66251314Sed  // Arguments:
67251314Sed  //      exchange_value: O0
68251314Sed  //      dest:           O1
69251314Sed  //
70251314Sed  // Results:
71251314Sed  //     O0: the value previously stored in dest
72251314Sed
73251314Sed        .inline _Atomic_swap64, 2
74251314Sed        .volatile
75251314Sed    1:
76251314Sed        mov     %o0, %o3
77251314Sed        ldx     [%o1], %o2
78251314Sed        casx    [%o1], %o2, %o3
79251314Sed        cmp     %o2, %o3
80251314Sed        bne     %xcc, 1b
81251314Sed         nop
82251314Sed        mov     %o2, %o0
83251314Sed        .nonvolatile
84251314Sed        .end
85251314Sed
86251314Sed
87251314Sed  // Support for jint Atomic::cmpxchg(jint           exchange_value,
88251314Sed  //                                  volatile jint* dest, 
89251314Sed  //                                  jint           compare_value)
90251314Sed  //
91251314Sed  // Arguments:
92251314Sed  //      exchange_value: O0
93251314Sed  //      dest:           O1
94251314Sed  //      compare_value:  O2
95251314Sed  //
96251314Sed  // Results:
97251314Sed  //     O0: the value previously stored in dest
98251314Sed
99251314Sed        .inline _Atomic_cas32, 3
100251314Sed        .volatile
101251314Sed        cas     [%o1], %o2, %o0
102251314Sed        .nonvolatile
103251314Sed        .end
104251314Sed
105251314Sed
106251314Sed  // Support for intptr_t Atomic::cmpxchg_ptr(intptr_t           exchange_value, 
107251314Sed  //                                          volatile intptr_t* dest, 
108251314Sed  //                                          intptr_t           compare_value)
109251314Sed  //
110251314Sed  // 64-bit
111251314Sed  //
112251314Sed  // Arguments:
113251314Sed  //      exchange_value: O0
114251314Sed  //      dest:           O1
115251314Sed  //      compare_value:  O2
116251314Sed  //
117  // Results:
118  //     O0: the value previously stored in dest
119
120        .inline _Atomic_cas64, 3
121        .volatile
122        casx    [%o1], %o2, %o0
123        .nonvolatile
124        .end
125
126
127  // Support for jlong Atomic::cmpxchg(jlong           exchange_value, 
128  //                                   volatile jlong* dest, 
129  //                                   jlong           compare_value)
130  //
131  // 32-bit calling conventions
132  //
133  // Arguments:
134  //      exchange_value: O1:O0
135  //      dest:           O2
136  //      compare_value:  O4:O3
137  //
138  // Results:
139  //     O1:O0: the value previously stored in dest
140
141        .inline _Atomic_casl, 3
142        .volatile
143        sllx    %o0, 32, %o0
144        srl     %o1, 0, %o1
145        or      %o0,%o1,%o0
146        sllx    %o3, 32, %o3
147        srl     %o4, 0, %o4
148        or      %o3,%o4,%o3
149        casx    [%o2], %o3, %o0
150        srl     %o0, 0, %o1
151        srlx    %o0, 32, %o0
152        .nonvolatile
153        .end
154
155  // Support for jlong Atomic::load and Atomic::store on v9.
156  //
157  // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
158  //
159  // Arguments:
160  //      src:  O0
161  //      dest: O1
162  //
163  // Overwrites O2
164
165        .inline _Atomic_move_long_v9,2
166        .volatile
167        ldx     [%o0], %o2
168        stx     %o2, [%o1]
169        .nonvolatile
170        .end
171
172  // Support for jint Atomic::add(jint add_value, volatile jint* dest).
173  //
174  // Arguments:
175  //      add_value: O0   (e.g., +1 or -1)
176  //      dest:      O1
177  //
178  // Results:
179  //     O0: the new value stored in dest
180  //
181  // Overwrites O3
182
183        .inline _Atomic_add32, 2
184        .volatile
185    2:
186        ld      [%o1], %o2
187        add     %o0, %o2, %o3
188        cas     [%o1], %o2, %o3
189        cmp     %o2, %o3
190        bne     2b
191         nop
192        add     %o0, %o2, %o0
193        .nonvolatile
194        .end
195
196
197  // Support for intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
198  //
199  // 64-bit
200  //
201  // Arguments:
202  //      add_value: O0   (e.g., +1 or -1)
203  //      dest:      O1
204  //
205  // Results:
206  //     O0: the new value stored in dest
207  //
208  // Overwrites O3
209
210        .inline _Atomic_add64, 2
211        .volatile
212    3:
213        ldx     [%o1], %o2
214        add     %o0, %o2, %o3
215        casx    [%o1], %o2, %o3
216        cmp     %o2, %o3
217        bne     %xcc, 3b
218         nop
219        add     %o0, %o2, %o0
220        .nonvolatile
221        .end
222
223
224  // Support for void OrderAccess::acquire()
225  // The method is intentionally empty.  
226  // It exists for the sole purpose of generating
227  // a C/C++ sequence point over which the compiler won't 
228  // reorder code.
229
230        .inline _OrderAccess_acquire,0
231        .volatile
232        .nonvolatile
233        .end
234
235
236  // Support for void OrderAccess::fence()
237
238        .inline _OrderAccess_fence,0
239        .volatile
240        membar  #StoreLoad
241        .nonvolatile
242        .end
243
244
245  // Support for void Prefetch::read(void *loc, intx interval)
246  //
247  // Prefetch for several reads.
248
249        .inline _Prefetch_read, 2
250        .volatile
251        prefetch [%o0+%o1], 0
252        .nonvolatile
253        .end
254
255
256  // Support for void Prefetch::write(void *loc, intx interval)
257  //
258  // Prefetch for several writes.
259
260        .inline _Prefetch_write, 2
261        .volatile
262        prefetch [%o0+%o1], 2
263        .nonvolatile
264        .end
265
266
267  // Support for void Copy::conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count)
268  //
269  // 32-bit
270  //
271  // Arguments:
272  //      from:  O0
273  //      to:    O1
274  //      count: O2 treated as signed
275  //
276  // Clobbers:
277  //      long_value: O2, O3
278  //      count:      O4
279  //
280  // if (from > to) {
281  //   while (--count >= 0) {
282  //     *to++ = *from++;
283  //   }
284  // } else {
285  //   while (--count >= 0) {
286  //     to[count] = from[count];
287  //   }
288  // }
289        .inline _Copy_conjoint_jlongs_atomic, 3
290        .volatile
291        cmp     %o0, %o1
292        bleu    4f
293        sll     %o2, 3, %o4
294        ba      2f
295    1:
296        subcc   %o4, 8, %o4
297        std     %o2, [%o1]
298        add     %o0, 8, %o0
299        add     %o1, 8, %o1
300    2:
301        bge,a   1b
302        ldd     [%o0], %o2
303        ba      5f
304        nop
305    3:
306        std     %o2, [%o1+%o4]
307    4:
308        subcc   %o4, 8, %o4
309        bge,a   3b
310        ldd     [%o0+%o4], %o2
311    5:
312        .nonvolatile
313        .end
314