orderAccess_linux_x86.inline.hpp revision 844:bd02caa94611
1/*
2 * Copyright 2003-2009 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// Implementation of class OrderAccess.
26
27inline void OrderAccess::loadload()   { acquire(); }
28inline void OrderAccess::storestore() { release(); }
29inline void OrderAccess::loadstore()  { acquire(); }
30inline void OrderAccess::storeload()  { fence(); }
31
32inline void OrderAccess::acquire() {
33  volatile intptr_t dummy;
34#ifdef AMD64
35  __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (dummy) : : "memory");
36#else
37  __asm__ volatile ("movl 0(%%esp),%0" : "=r" (dummy) : : "memory");
38#endif // AMD64
39}
40
41inline void OrderAccess::release() {
42  dummy = 0;
43}
44
45inline void OrderAccess::fence() {
46  if (os::is_MP()) {
47    // always use locked addl since mfence is sometimes expensive
48#ifdef AMD64
49    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
50#else
51    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
52#endif
53  }
54}
55
56inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
57inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
58inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
59inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
60inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
61inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
62inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
63inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
64inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
65inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
66
67inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
68inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
69inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
70
71inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
72inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
73inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
74inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
75inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
76inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
77inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
78inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
79inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
80inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
81
82inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
83inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
84
85inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
86  __asm__ volatile (  "xchgb (%2),%0"
87                    : "=r" (v)
88                    : "0" (v), "r" (p)
89                    : "memory");
90}
91inline void     OrderAccess::store_fence(jshort* p, jshort v) {
92  __asm__ volatile (  "xchgw (%2),%0"
93                    : "=r" (v)
94                    : "0" (v), "r" (p)
95                    : "memory");
96}
97inline void     OrderAccess::store_fence(jint*   p, jint   v) {
98  __asm__ volatile (  "xchgl (%2),%0"
99                    : "=r" (v)
100                    : "0" (v), "r" (p)
101                    : "memory");
102}
103
104inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
105#ifdef AMD64
106  __asm__ __volatile__ ("xchgq (%2), %0"
107                        : "=r" (v)
108                        : "0" (v), "r" (p)
109                        : "memory");
110#else
111  *p = v; fence();
112#endif // AMD64
113}
114
115// AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
116// compiler does the inlining this is simpler.
117inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
118inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
119inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
120inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
121inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
122inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
123
124inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
125#ifdef AMD64
126  __asm__ __volatile__ ("xchgq (%2), %0"
127                        : "=r" (v)
128                        : "0" (v), "r" (p)
129                        : "memory");
130#else
131  store_fence((jint*)p, (jint)v);
132#endif // AMD64
133}
134
135inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
136#ifdef AMD64
137  __asm__ __volatile__ ("xchgq (%2), %0"
138                        : "=r" (v)
139                        : "0" (v), "r" (p)
140                        : "memory");
141#else
142  store_fence((jint*)p, (jint)v);
143#endif // AMD64
144}
145
146// Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
147inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
148  __asm__ volatile (  "xchgb (%2),%0"
149                    : "=r" (v)
150                    : "0" (v), "r" (p)
151                    : "memory");
152}
153inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
154  __asm__ volatile (  "xchgw (%2),%0"
155                    : "=r" (v)
156                    : "0" (v), "r" (p)
157                    : "memory");
158}
159inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
160  __asm__ volatile (  "xchgl (%2),%0"
161                    : "=r" (v)
162                    : "0" (v), "r" (p)
163                    : "memory");
164}
165
166inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
167#ifdef AMD64
168  __asm__ __volatile__ (  "xchgq (%2), %0"
169                          : "=r" (v)
170                          : "0" (v), "r" (p)
171                          : "memory");
172#else
173  *p = v; fence();
174#endif // AMD64
175}
176
177inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
178inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
179inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
180inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
181
182inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
183inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
184
185inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
186#ifdef AMD64
187  __asm__ __volatile__ (  "xchgq (%2), %0"
188                          : "=r" (v)
189                          : "0" (v), "r" (p)
190                          : "memory");
191#else
192  release_store_fence((volatile jint*)p, (jint)v);
193#endif // AMD64
194}
195inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
196#ifdef AMD64
197  __asm__ __volatile__ (  "xchgq (%2), %0"
198                          : "=r" (v)
199                          : "0" (v), "r" (p)
200                          : "memory");
201#else
202  release_store_fence((volatile jint*)p, (jint)v);
203#endif // AMD64
204}
205