1/*
2 * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26#define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28#include "memory/allocation.hpp"
29#include "utilities/macros.hpp"
30
31enum cmpxchg_memory_order {
32  memory_order_relaxed,
33  // Use value which doesn't interfere with C++2011. We need to be more conservative.
34  memory_order_conservative = 8
35};
36
37class Atomic : AllStatic {
38 public:
39  // Atomic operations on jlong types are not available on all 32-bit
40  // platforms. If atomic ops on jlongs are defined here they must only
41  // be used from code that verifies they are available at runtime and
42  // can provide an alternative action if not - see supports_cx8() for
43  // a means to test availability.
44
45  // The memory operations that are mentioned with each of the atomic
46  // function families come from src/share/vm/runtime/orderAccess.hpp,
47  // e.g., <fence> is described in that file and is implemented by the
48  // OrderAccess::fence() function. See that file for the gory details
49  // on the Memory Access Ordering Model.
50
51  // All of the atomic operations that imply a read-modify-write action
52  // guarantee a two-way memory barrier across that operation. Historically
53  // these semantics reflect the strength of atomic operations that are
54  // provided on SPARC/X86. We assume that strength is necessary unless
55  // we can prove that a weaker form is sufficiently safe.
56
57  // Atomically store to a location
58  inline static void store    (jbyte    store_value, jbyte*    dest);
59  inline static void store    (jshort   store_value, jshort*   dest);
60  inline static void store    (jint     store_value, jint*     dest);
61  // See comment above about using jlong atomics on 32-bit platforms
62  inline static void store    (jlong    store_value, jlong*    dest);
63  inline static void store_ptr(intptr_t store_value, intptr_t* dest);
64  inline static void store_ptr(void*    store_value, void*     dest);
65
66  inline static void store    (jbyte    store_value, volatile jbyte*    dest);
67  inline static void store    (jshort   store_value, volatile jshort*   dest);
68  inline static void store    (jint     store_value, volatile jint*     dest);
69  // See comment above about using jlong atomics on 32-bit platforms
70  inline static void store    (jlong    store_value, volatile jlong*    dest);
71  inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
72  inline static void store_ptr(void*    store_value, volatile void*     dest);
73
74  // See comment above about using jlong atomics on 32-bit platforms
75  inline static jlong load(volatile jlong* src);
76
77  // Atomically add to a location. Returns updated value. add*() provide:
78  // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
79  inline static jshort   add    (jshort   add_value, volatile jshort*   dest);
80  inline static jint     add    (jint     add_value, volatile jint*     dest);
81  inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
82  inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
83  inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
84  // See comment above about using jlong atomics on 32-bit platforms
85  inline static jlong    add    (jlong    add_value, volatile jlong*    dest);
86
87  // Atomically increment location. inc*() provide:
88  // <fence> increment-dest <membar StoreLoad|StoreStore>
89  inline static void inc    (volatile jint*     dest);
90  inline static void inc    (volatile jshort*   dest);
91  inline static void inc    (volatile size_t*   dest);
92  inline static void inc_ptr(volatile intptr_t* dest);
93  inline static void inc_ptr(volatile void*     dest);
94
95  // Atomically decrement a location. dec*() provide:
96  // <fence> decrement-dest <membar StoreLoad|StoreStore>
97  inline static void dec    (volatile jint*     dest);
98  inline static void dec    (volatile jshort*   dest);
99  inline static void dec    (volatile size_t*   dest);
100  inline static void dec_ptr(volatile intptr_t* dest);
101  inline static void dec_ptr(volatile void*     dest);
102
103  // Performs atomic exchange of *dest with exchange_value. Returns old
104  // prior value of *dest. xchg*() provide:
105  // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
106  inline static jint         xchg    (jint         exchange_value, volatile jint*         dest);
107  inline static unsigned int xchg    (unsigned int exchange_value, volatile unsigned int* dest);
108  inline static intptr_t     xchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest);
109  inline static void*        xchg_ptr(void*        exchange_value, volatile void*         dest);
110
111  // Performs atomic compare of *dest and compare_value, and exchanges
112  // *dest with exchange_value if the comparison succeeded. Returns prior
113  // value of *dest. cmpxchg*() provide:
114  // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
115  inline static jbyte        cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value, cmpxchg_memory_order order = memory_order_conservative);
116  inline static jint         cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value, cmpxchg_memory_order order = memory_order_conservative);
117  // See comment above about using jlong atomics on 32-bit platforms
118  inline static jlong        cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value, cmpxchg_memory_order order = memory_order_conservative);
119  inline static unsigned int cmpxchg    (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
120  inline static intptr_t     cmpxchg_ptr(intptr_t     exchange_value, volatile intptr_t*     dest, intptr_t     compare_value, cmpxchg_memory_order order = memory_order_conservative);
121  inline static void*        cmpxchg_ptr(void*        exchange_value, volatile void*         dest, void*        compare_value, cmpxchg_memory_order order = memory_order_conservative);
122};
123
124// platform specific in-line definitions - must come before shared definitions
125
126#include OS_CPU_HEADER(atomic)
127
128// shared in-line definitions
129
130// size_t casts...
131#if (SIZE_MAX != UINTPTR_MAX)
132#error size_t is not WORD_SIZE, interesting platform, but missing implementation here
133#endif
134
135inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
136  return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
137}
138
139inline void Atomic::inc(volatile size_t* dest) {
140  inc_ptr((volatile intptr_t*) dest);
141}
142
143inline void Atomic::dec(volatile size_t* dest) {
144  dec_ptr((volatile intptr_t*) dest);
145}
146
147#ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
148/*
149 * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
150 * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
151 * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
152 * implementation to be used instead.
153 */
154inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
155                             jbyte compare_value, cmpxchg_memory_order order) {
156  STATIC_ASSERT(sizeof(jbyte) == 1);
157  volatile jint* dest_int =
158      static_cast<volatile jint*>(align_ptr_down(dest, sizeof(jint)));
159  size_t offset = pointer_delta(dest, dest_int, 1);
160  jint cur = *dest_int;
161  jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
162
163  // current value may not be what we are looking for, so force it
164  // to that value so the initial cmpxchg will fail if it is different
165  cur_as_bytes[offset] = compare_value;
166
167  // always execute a real cmpxchg so that we get the required memory
168  // barriers even on initial failure
169  do {
170    // value to swap in matches current value ...
171    jint new_value = cur;
172    // ... except for the one jbyte we want to update
173    reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
174
175    jint res = cmpxchg(new_value, dest_int, cur, order);
176    if (res == cur) break; // success
177
178    // at least one jbyte in the jint changed value, so update
179    // our view of the current jint
180    cur = res;
181    // if our jbyte is still as cur we loop and try again
182  } while (cur_as_bytes[offset] == compare_value);
183
184  return cur_as_bytes[offset];
185}
186
187#endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
188
189inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
190  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
191  return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
192}
193
194inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
195                         volatile unsigned int* dest, unsigned int compare_value,
196                         cmpxchg_memory_order order) {
197  assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
198  return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
199                                       (jint)compare_value, order);
200}
201
202inline jlong Atomic::add(jlong    add_value, volatile jlong*    dest) {
203  jlong old = load(dest);
204  jlong new_value = old + add_value;
205  while (old != cmpxchg(new_value, dest, old)) {
206    old = load(dest);
207    new_value = old + add_value;
208  }
209  return old;
210}
211
212inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
213  // Most platforms do not support atomic add on a 2-byte value. However,
214  // if the value occupies the most significant 16 bits of an aligned 32-bit
215  // word, then we can do this with an atomic add of (add_value << 16)
216  // to the 32-bit word.
217  //
218  // The least significant parts of this 32-bit word will never be affected, even
219  // in case of overflow/underflow.
220  //
221  // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
222#ifdef VM_LITTLE_ENDIAN
223  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
224  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
225#else
226  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
227  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
228#endif
229  return (jshort)(new_value >> 16); // preserves sign
230}
231
232inline void Atomic::inc(volatile jshort* dest) {
233  (void)add(1, dest);
234}
235
236inline void Atomic::dec(volatile jshort* dest) {
237  (void)add(-1, dest);
238}
239
240#endif // SHARE_VM_RUNTIME_ATOMIC_HPP
241