orderAccess_linux_x86.inline.hpp revision 4995:4614a598dae1
1/*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
26#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
27
28#include "runtime/atomic.inline.hpp"
29#include "runtime/orderAccess.hpp"
30#include "runtime/os.hpp"
31#include "vm_version_x86.hpp"
32
33// Implementation of class OrderAccess.
34
35inline void OrderAccess::loadload()   { acquire(); }
36inline void OrderAccess::storestore() { release(); }
37inline void OrderAccess::loadstore()  { acquire(); }
38inline void OrderAccess::storeload()  { fence(); }
39
40inline void OrderAccess::acquire() {
41  volatile intptr_t local_dummy;
42#ifdef AMD64
43  __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
44#else
45  __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
46#endif // AMD64
47}
48
49inline void OrderAccess::release() {
50  // Avoid hitting the same cache-line from
51  // different threads.
52  volatile jint local_dummy = 0;
53}
54
55inline void OrderAccess::fence() {
56  if (os::is_MP()) {
57    // always use locked addl since mfence is sometimes expensive
58#ifdef AMD64
59    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
60#else
61    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
62#endif
63  }
64}
65
66inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
67inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
68inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
69inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
70inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
71inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
72inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
73inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
74inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
75inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
76
77inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
78inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
79inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
80
81inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
82inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
83inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
84inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
85inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
86inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
87inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
88inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
89inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
90inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
91
92inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
93inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
94
95inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
96  __asm__ volatile (  "xchgb (%2),%0"
97                    : "=q" (v)
98                    : "0" (v), "r" (p)
99                    : "memory");
100}
101inline void     OrderAccess::store_fence(jshort* p, jshort v) {
102  __asm__ volatile (  "xchgw (%2),%0"
103                    : "=r" (v)
104                    : "0" (v), "r" (p)
105                    : "memory");
106}
107inline void     OrderAccess::store_fence(jint*   p, jint   v) {
108  __asm__ volatile (  "xchgl (%2),%0"
109                    : "=r" (v)
110                    : "0" (v), "r" (p)
111                    : "memory");
112}
113
114inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
115#ifdef AMD64
116  __asm__ __volatile__ ("xchgq (%2), %0"
117                        : "=r" (v)
118                        : "0" (v), "r" (p)
119                        : "memory");
120#else
121  *p = v; fence();
122#endif // AMD64
123}
124
125// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
126// compiler does the inlining this is simpler.
127inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
128inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
129inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
130inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
131inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
132inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { store_fence((jlong*)p, jlong_cast(v)); }
133
134inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
135#ifdef AMD64
136  __asm__ __volatile__ ("xchgq (%2), %0"
137                        : "=r" (v)
138                        : "0" (v), "r" (p)
139                        : "memory");
140#else
141  store_fence((jint*)p, (jint)v);
142#endif // AMD64
143}
144
145inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
146#ifdef AMD64
147  __asm__ __volatile__ ("xchgq (%2), %0"
148                        : "=r" (v)
149                        : "0" (v), "r" (p)
150                        : "memory");
151#else
152  store_fence((jint*)p, (jint)v);
153#endif // AMD64
154}
155
156// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
157inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
158  __asm__ volatile (  "xchgb (%2),%0"
159                    : "=q" (v)
160                    : "0" (v), "r" (p)
161                    : "memory");
162}
163inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
164  __asm__ volatile (  "xchgw (%2),%0"
165                    : "=r" (v)
166                    : "0" (v), "r" (p)
167                    : "memory");
168}
169inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
170  __asm__ volatile (  "xchgl (%2),%0"
171                    : "=r" (v)
172                    : "0" (v), "r" (p)
173                    : "memory");
174}
175
176inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
177#ifdef AMD64
178  __asm__ __volatile__ (  "xchgq (%2), %0"
179                          : "=r" (v)
180                          : "0" (v), "r" (p)
181                          : "memory");
182#else
183  release_store(p, v); fence();
184#endif // AMD64
185}
186
187inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
188inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
189inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
190inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
191
192inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
193inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
194
195inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
196#ifdef AMD64
197  __asm__ __volatile__ (  "xchgq (%2), %0"
198                          : "=r" (v)
199                          : "0" (v), "r" (p)
200                          : "memory");
201#else
202  release_store_fence((volatile jint*)p, (jint)v);
203#endif // AMD64
204}
205inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
206#ifdef AMD64
207  __asm__ __volatile__ (  "xchgq (%2), %0"
208                          : "=r" (v)
209                          : "0" (v), "r" (p)
210                          : "memory");
211#else
212  release_store_fence((volatile jint*)p, (jint)v);
213#endif // AMD64
214}
215
216#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
217