solaris_x86_32.il revision 0:a61af66fc99e
1//
2// Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
3// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4//
5// This code is free software; you can redistribute it and/or modify it
6// under the terms of the GNU General Public License version 2 only, as
7// published by the Free Software Foundation.
8//
9// This code is distributed in the hope that it will be useful, but WITHOUT
10// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12// version 2 for more details (a copy is included in the LICENSE file that
13// accompanied this code).
14//
15// You should have received a copy of the GNU General Public License version
16// 2 along with this work; if not, write to the Free Software Foundation,
17// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18//
19// Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20// CA 95054 USA or visit www.sun.com if you need additional information or
21// have any questions.
22//  
23//
24
25
26  // Support for u8 os::setup_fpu()
27      .inline _solaris_raw_setup_fpu,1
28      movl     0(%esp), %eax
29      fldcw    (%eax)
30      .end
31
32  // The argument size of each inline directive is ignored by the compiler
33  // and is set to 0 for compatibility reason.
34
35  // Get the raw thread ID from %gs:0
36      .inline _raw_thread_id,0
37      movl     %gs:0, %eax 
38      .end
39
40  // Get callers fp
41      .inline _get_previous_fp,0
42      movl     %ebp, %eax 
43      movl     %eax, %eax
44      .end
45
46  // Support for jint Atomic::add(jint inc, volatile jint* dest)
47  // An additional bool (os::is_MP()) is passed as the last argument.
48      .inline _Atomic_add,3
49      movl     0(%esp), %eax   // inc
50      movl     4(%esp), %edx   // dest
51      movl     %eax, %ecx
52      cmpl     $0, 8(%esp)     // MP test
53      je       1f
54      lock
551:    xaddl    %eax, (%edx)
56      addl     %ecx, %eax
57      .end
58
59  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
60      .inline _Atomic_xchg,2
61      movl     0(%esp), %eax   // exchange_value
62      movl     4(%esp), %ecx   // dest
63      xchgl    (%ecx), %eax
64      .end
65
66  // Support for jint Atomic::cmpxchg(jint exchange_value, 
67  //                                  volatile jint *dest, 
68  //                                  jint compare_value)
69  // An additional bool (os::is_MP()) is passed as the last argument.
70      .inline _Atomic_cmpxchg,4
71      movl     8(%esp), %eax   // compare_value
72      movl     0(%esp), %ecx   // exchange_value
73      movl     4(%esp), %edx   // dest
74      cmp      $0, 12(%esp)    // MP test
75      je       1f
76      lock
771:    cmpxchgl %ecx, (%edx)
78      .end
79
80  // Support for jlong Atomic::cmpxchg(jlong exchange_value,
81  //                                   volatile jlong* dest,
82  //                                   jlong compare_value)
83  // An additional bool (os::is_MP()) is passed as the last argument.
84      .inline _Atomic_cmpxchg_long,6
85      pushl    %ebx
86      pushl    %edi
87      movl     20(%esp), %eax  // compare_value (low)
88      movl     24(%esp), %edx  // compare_value (high)
89      movl     16(%esp), %edi  // dest
90      movl     8(%esp), %ebx   // exchange_value (low)
91      movl     12(%esp), %ecx  // exchange_high (high)
92      cmp      $0, 28(%esp)    // MP test
93      je       1f
94      lock
951:    cmpxchg8b (%edi)
96      popl     %edi
97      popl     %ebx
98      .end
99
100  // Support for OrderAccess::acquire()
101      .inline _OrderAccess_acquire,0
102      movl     0(%esp), %eax
103      .end
104
105  // Support for OrderAccess::fence()
106      .inline _OrderAccess_fence,0
107      lock
108      addl     $0, (%esp)
109      .end
110
111  // Support for u2 Bytes::swap_u2(u2 x)
112      .inline _raw_swap_u2,1
113      movl     0(%esp), %eax
114      xchgb    %al, %ah
115      .end
116
117  // Support for u4 Bytes::swap_u4(u4 x)
118      .inline _raw_swap_u4,1
119      movl     0(%esp), %eax
120      bswap    %eax
121      .end
122
123  // Support for u8 Bytes::swap_u8_base(u4 x, u4 y)
124      .inline _raw_swap_u8,2
125      movl     4(%esp), %eax   // y
126      movl     0(%esp), %edx   // x
127      bswap    %eax
128      bswap    %edx
129      .end
130