solaris_x86_64.il revision 1949:0a8e0d4345b3
1//
2// Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4//
5// This code is free software; you can redistribute it and/or modify it
6// under the terms of the GNU General Public License version 2 only, as
7// published by the Free Software Foundation.
8//
9// This code is distributed in the hope that it will be useful, but WITHOUT
10// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12// version 2 for more details (a copy is included in the LICENSE file that
13// accompanied this code).
14//
15// You should have received a copy of the GNU General Public License version
16// 2 along with this work; if not, write to the Free Software Foundation,
17// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18//
19// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20// or visit www.oracle.com if you need additional information or have any
21// questions.
22//  
23//
24
25  // The argument size of each inline directive is ignored by the compiler
26  // and is set to the number of arguments as documentation.
27
28  // Get the raw thread ID from %gs:0
29      .inline _raw_thread_id,0
30      movq     %fs:0, %rax 
31      .end
32
33  // Get the frame pointer from current frame.
34      .inline _get_current_fp,0
35      .volatile
36      movq     %rbp, %rax 
37      .end
38
39  // Support for jint Atomic::add(jint add_value, volatile jint* dest)
40      .inline _Atomic_add,2
41      movl     %edi, %eax      // save add_value for return
42      lock
43      xaddl    %edi, (%rsi)
44      addl     %edi, %eax
45      .end
46
47  // Support for jlong Atomic::add(jlong add_value, volatile jlong* dest)
48      .inline _Atomic_add_long,2
49      movq     %rdi, %rax      // save add_value for return
50      lock
51      xaddq    %rdi, (%rsi)
52      addq     %rdi, %rax
53      .end
54
55  // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
56      .inline _Atomic_xchg,2
57      xchgl    (%rsi), %edi
58      movl     %edi, %eax
59      .end
60
61  // Support for jlong Atomic::xchg(jlong exchange_value, volatile jlong* dest).
62      .inline _Atomic_xchg_long,2
63      xchgq    (%rsi), %rdi
64      movq     %rdi, %rax
65      .end
66
67  // Support for jint Atomic::cmpxchg(jint exchange_value, 
68  //                                  volatile jint *dest, 
69  //                                  jint compare_value)
70      .inline _Atomic_cmpxchg,3
71      movl     %edx, %eax      // compare_value
72      lock
73      cmpxchgl %edi, (%rsi)
74      .end
75
76  // Support for jlong Atomic::cmpxchg(jlong exchange_value,
77  //                                   volatile jlong* dest,
78  //                                   jlong compare_value)
79      .inline _Atomic_cmpxchg_long,3
80      movq     %rdx, %rax      // compare_value
81      lock
82      cmpxchgq %rdi, (%rsi)
83      .end
84
85  // Support for OrderAccess::acquire()
86      .inline _OrderAccess_acquire,0
87      movl     0(%rsp), %eax
88      .end
89
90  // Support for OrderAccess::fence()
91      .inline _OrderAccess_fence,0
92      lock
93      addl     $0, (%rsp)
94      .end
95
96  // Support for u2 Bytes::swap_u2(u2 x)
97      .inline _raw_swap_u2,1
98      movw     %di, %ax
99      rorw     $8, %ax
100      .end
101
102  // Support for u4 Bytes::swap_u4(u4 x)
103      .inline _raw_swap_u4,1
104      movl     %edi, %eax
105      bswapl   %eax
106      .end
107
108  // Support for u8 Bytes::swap_u8(u8 x)
109      .inline _raw_swap_u8,1
110      movq     %rdi, %rax
111      bswapq   %rax
112      .end
113
114  // Support for void Prefetch::read
115      .inline _Prefetch_read,2
116      prefetcht0 (%rdi, %rsi, 1)
117      .end
118
119  // Support for void Prefetch::write
120  // We use prefetcht0 because em64t doesn't support prefetchw.
121  // prefetchw is a 3dnow instruction.
122      .inline _Prefetch_write,2
123      prefetcht0 (%rdi, %rsi, 1)
124      .end
125