orderAccess.hpp revision 605:98cb887364d3
1/*
2 * Copyright 2003 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25//                Memory Access Ordering Model
26//
27// This interface is based on the JSR-133 Cookbook for Compiler Writers
28// and on the IA64 memory model.  It is the dynamic equivalent of the
29// C/C++ volatile specifier.  I.e., volatility restricts compile-time
30// memory access reordering in a way similar to what we want to occur
31// at runtime.
32//
33// In the following, the terms 'previous', 'subsequent', 'before',
34// 'after', 'preceding' and 'succeeding' refer to program order.  The
35// terms 'down' and 'below' refer to forward load or store motion
36// relative to program order, while 'up' and 'above' refer to backward
37// motion.
38//
39//
40// We define four primitive memory barrier operations.
41//
42// LoadLoad:   Load1(s); LoadLoad; Load2
43//
44// Ensures that Load1 completes (obtains the value it loads from memory)
45// before Load2 and any subsequent load operations.  Loads before Load1
46// may *not* float below Load2 and any subsequent load operations.
47//
48// StoreStore: Store1(s); StoreStore; Store2
49//
50// Ensures that Store1 completes (the effect on memory of Store1 is made
51// visible to other processors) before Store2 and any subsequent store
52// operations.  Stores before Store1 may *not* float below Store2 and any
53// subsequent store operations.
54//
55// LoadStore:  Load1(s); LoadStore; Store2
56//
57// Ensures that Load1 completes before Store2 and any subsequent store
58// operations.  Loads before Load1 may *not* float below Store2 and any
59// subseqeuent store operations.
60//
61// StoreLoad:  Store1(s); StoreLoad; Load2
62//
63// Ensures that Store1 completes before Load2 and any subsequent load
64// operations.  Stores before Store1 may *not* float below Load2 and any
65// subseqeuent load operations.
66//
67//
68// We define two further operations, 'release' and 'acquire'.  They are
69// mirror images of each other.
70//
71// Execution by a processor of release makes the effect of all memory
72// accesses issued by it previous to the release visible to all
73// processors *before* the release completes.  The effect of subsequent
74// memory accesses issued by it *may* be made visible *before* the
75// release.  I.e., subsequent memory accesses may float above the
76// release, but prior ones may not float below it.
77//
78// Execution by a processor of acquire makes the effect of all memory
79// accesses issued by it subsequent to the acquire visible to all
80// processors *after* the acquire completes.  The effect of prior memory
81// accesses issued by it *may* be made visible *after* the acquire.
82// I.e., prior memory accesses may float below the acquire, but
83// subsequent ones may not float above it.
84//
85// Finally, we define a 'fence' operation, which conceptually is a
86// release combined with an acquire.  In the real world these operations
87// require one or more machine instructions which can float above and
88// below the release or acquire, so we usually can't just issue the
89// release-acquire back-to-back.  All machines we know of implement some
90// sort of memory fence instruction.
91//
92//
93// The standalone implementations of release and acquire need an associated
94// dummy volatile store or load respectively.  To avoid redundant operations,
95// we can define the composite operators: 'release_store', 'store_fence' and
96// 'load_acquire'.  Here's a summary of the machine instructions corresponding
97// to each operation.
98//
99//               sparc RMO             ia64             x86
100// ---------------------------------------------------------------------
101// fence         membar #LoadStore |   mf               lock addl 0,(sp)
102//                      #StoreStore |
103//                      #LoadLoad |
104//                      #StoreLoad
105//
106// release       membar #LoadStore |   st.rel [sp]=r0   movl $0,<dummy>
107//                      #StoreStore
108//               st %g0,[]
109//
110// acquire       ld [%sp],%g0          ld.acq <r>=[sp]  movl (sp),<r>
111//               membar #LoadLoad |
112//                      #LoadStore
113//
114// release_store membar #LoadStore |   st.rel           <store>
115//                      #StoreStore
116//               st
117//
118// store_fence   st                    st               lock xchg
119//               fence                 mf
120//
121// load_acquire  ld                    ld.acq           <load>
122//               membar #LoadLoad |
123//                      #LoadStore
124//
125// Using only release_store and load_acquire, we can implement the
126// following ordered sequences.
127//
128// 1. load, load   == load_acquire,  load
129//                 or load_acquire,  load_acquire
130// 2. load, store  == load,          release_store
131//                 or load_acquire,  store
132//                 or load_acquire,  release_store
133// 3. store, store == store,         release_store
134//                 or release_store, release_store
135//
136// These require no membar instructions for sparc-TSO and no extra
137// instructions for ia64.
138//
139// Ordering a load relative to preceding stores requires a store_fence,
140// which implies a membar #StoreLoad between the store and load under
141// sparc-TSO.  A fence is required by ia64.  On x86, we use locked xchg.
142//
143// 4. store, load  == store_fence, load
144//
145// Use store_fence to make sure all stores done in an 'interesting'
146// region are made visible prior to both subsequent loads and stores.
147//
148// Conventional usage is to issue a load_acquire for ordered loads.  Use
149// release_store for ordered stores when you care only that prior stores
150// are visible before the release_store, but don't care exactly when the
151// store associated with the release_store becomes visible.  Use
152// release_store_fence to update values like the thread state, where we
153// don't want the current thread to continue until all our prior memory
154// accesses (including the new thread state) are visible to other threads.
155//
156//
157//                C++ Volatility
158//
159// C++ guarantees ordering at operations termed 'sequence points' (defined
160// to be volatile accesses and calls to library I/O functions).  'Side
161// effects' (defined as volatile accesses, calls to library I/O functions
162// and object modification) previous to a sequence point must be visible
163// at that sequence point.  See the C++ standard, section 1.9, titled
164// "Program Execution".  This means that all barrier implementations,
165// including standalone loadload, storestore, loadstore, storeload, acquire
166// and release must include a sequence point, usually via a volatile memory
167// access.  Other ways to guarantee a sequence point are, e.g., use of
168// indirect calls and linux's __asm__ volatile.
169//
170//
171//                os::is_MP Considered Redundant
172//
173// Callers of this interface do not need to test os::is_MP() before
174// issuing an operation. The test is taken care of by the implementation
175// of the interface (depending on the vm version and platform, the test
176// may or may not be actually done by the implementation).
177//
178//
179//                A Note on Memory Ordering and Cache Coherency
180//
181// Cache coherency and memory ordering are orthogonal concepts, though they
182// interact.  E.g., all existing itanium machines are cache-coherent, but
183// the hardware can freely reorder loads wrt other loads unless it sees a
184// load-acquire instruction.  All existing sparc machines are cache-coherent
185// and, unlike itanium, TSO guarantees that the hardware orders loads wrt
186// loads and stores, and stores wrt to each other.
187//
188// Consider the implementation of loadload.  *If* your platform *isn't*
189// cache-coherent, then loadload must not only prevent hardware load
190// instruction reordering, but it must *also* ensure that subsequent
191// loads from addresses that could be written by other processors (i.e.,
192// that are broadcast by other processors) go all the way to the first
193// level of memory shared by those processors and the one issuing
194// the loadload.
195//
196// So if we have a MP that has, say, a per-processor D$ that doesn't see
197// writes by other processors, and has a shared E$ that does, the loadload
198// barrier would have to make sure that either
199//
200// 1. cache lines in the issuing processor's D$ that contained data from
201// addresses that could be written by other processors are invalidated, so
202// subsequent loads from those addresses go to the E$, (it could do this
203// by tagging such cache lines as 'shared', though how to tell the hardware
204// to do the tagging is an interesting problem), or
205//
206// 2. there never are such cache lines in the issuing processor's D$, which
207// means all references to shared data (however identified: see above)
208// bypass the D$ (i.e., are satisfied from the E$).
209//
210// If your machine doesn't have an E$, substitute 'main memory' for 'E$'.
211//
212// Either of these alternatives is a pain, so no current machine we know of
213// has incoherent caches.
214//
215// If loadload didn't have these properties, the store-release sequence for
216// publishing a shared data structure wouldn't work, because a processor
217// trying to read data newly published by another processor might go to
218// its own incoherent caches to satisfy the read instead of to the newly
219// written shared memory.
220//
221//
222//                NOTE WELL!!
223//
224//                A Note on MutexLocker and Friends
225//
226// See mutexLocker.hpp.  We assume throughout the VM that MutexLocker's
227// and friends' constructors do a fence, a lock and an acquire *in that
228// order*.  And that their destructors do a release and unlock, in *that*
229// order.  If their implementations change such that these assumptions
230// are violated, a whole lot of code will break.
231
232class OrderAccess : AllStatic {
233 public:
234  static void     loadload();
235  static void     storestore();
236  static void     loadstore();
237  static void     storeload();
238
239  static void     acquire();
240  static void     release();
241  static void     fence();
242
243  static jbyte    load_acquire(volatile jbyte*   p);
244  static jshort   load_acquire(volatile jshort*  p);
245  static jint     load_acquire(volatile jint*    p);
246  static jlong    load_acquire(volatile jlong*   p);
247  static jubyte   load_acquire(volatile jubyte*  p);
248  static jushort  load_acquire(volatile jushort* p);
249  static juint    load_acquire(volatile juint*   p);
250  static julong   load_acquire(volatile julong*  p);
251  static jfloat   load_acquire(volatile jfloat*  p);
252  static jdouble  load_acquire(volatile jdouble* p);
253
254  static intptr_t load_ptr_acquire(volatile intptr_t*   p);
255  static void*    load_ptr_acquire(volatile void*       p);
256  static void*    load_ptr_acquire(const volatile void* p);
257
258  static void     release_store(volatile jbyte*   p, jbyte   v);
259  static void     release_store(volatile jshort*  p, jshort  v);
260  static void     release_store(volatile jint*    p, jint    v);
261  static void     release_store(volatile jlong*   p, jlong   v);
262  static void     release_store(volatile jubyte*  p, jubyte  v);
263  static void     release_store(volatile jushort* p, jushort v);
264  static void     release_store(volatile juint*   p, juint   v);
265  static void     release_store(volatile julong*  p, julong  v);
266  static void     release_store(volatile jfloat*  p, jfloat  v);
267  static void     release_store(volatile jdouble* p, jdouble v);
268
269  static void     release_store_ptr(volatile intptr_t* p, intptr_t v);
270  static void     release_store_ptr(volatile void*     p, void*    v);
271
272  static void     store_fence(jbyte*   p, jbyte   v);
273  static void     store_fence(jshort*  p, jshort  v);
274  static void     store_fence(jint*    p, jint    v);
275  static void     store_fence(jlong*   p, jlong   v);
276  static void     store_fence(jubyte*  p, jubyte  v);
277  static void     store_fence(jushort* p, jushort v);
278  static void     store_fence(juint*   p, juint   v);
279  static void     store_fence(julong*  p, julong  v);
280  static void     store_fence(jfloat*  p, jfloat  v);
281  static void     store_fence(jdouble* p, jdouble v);
282
283  static void     store_ptr_fence(intptr_t* p, intptr_t v);
284  static void     store_ptr_fence(void**    p, void*    v);
285
286  static void     release_store_fence(volatile jbyte*   p, jbyte   v);
287  static void     release_store_fence(volatile jshort*  p, jshort  v);
288  static void     release_store_fence(volatile jint*    p, jint    v);
289  static void     release_store_fence(volatile jlong*   p, jlong   v);
290  static void     release_store_fence(volatile jubyte*  p, jubyte  v);
291  static void     release_store_fence(volatile jushort* p, jushort v);
292  static void     release_store_fence(volatile juint*   p, juint   v);
293  static void     release_store_fence(volatile julong*  p, julong  v);
294  static void     release_store_fence(volatile jfloat*  p, jfloat  v);
295  static void     release_store_fence(volatile jdouble* p, jdouble v);
296
297  static void     release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
298  static void     release_store_ptr_fence(volatile void*     p, void*    v);
299
300  // In order to force a memory access, implementations may
301  // need a volatile externally visible dummy variable.
302  static volatile intptr_t dummy;
303};
304