atomic_linux_zero.hpp revision 11857:d0fbf661cc16
1/*
2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
27#define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
28
29#include "runtime/os.hpp"
30
31// Implementation of class atomic
32
33#ifdef M68K
34
35/*
36 * __m68k_cmpxchg
37 *
38 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
39 * Returns newval on success and oldval if no exchange happened.
40 * This implementation is processor specific and works on
41 * 68020 68030 68040 and 68060.
42 *
43 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
44 * instruction.
45 * Using a kernelhelper would be better for arch complete implementation.
46 *
47 */
48
49static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
50  int ret;
51  __asm __volatile ("cas%.l %0,%2,%1"
52                   : "=d" (ret), "+m" (*(ptr))
53                   : "d" (newval), "0" (oldval));
54  return ret;
55}
56
57/* Perform an atomic compare and swap: if the current value of `*PTR'
58   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
59   `*PTR' before the operation.*/
60static inline int m68k_compare_and_swap(volatile int *ptr,
61                                        int oldval,
62                                        int newval) {
63  for (;;) {
64      int prev = *ptr;
65      if (prev != oldval)
66        return prev;
67
68      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
69        // Success.
70        return prev;
71
72      // We failed even though prev == oldval.  Try again.
73    }
74}
75
76/* Atomically add an int to memory.  */
77static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
78  for (;;) {
79      // Loop until success.
80
81      int prev = *ptr;
82
83      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
84        return prev + add_value;
85    }
86}
87
88/* Atomically write VALUE into `*PTR' and returns the previous
89   contents of `*PTR'.  */
90static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
91  for (;;) {
92      // Loop until success.
93      int prev = *ptr;
94
95      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
96        return prev;
97    }
98}
99#endif // M68K
100
101#ifdef ARM
102
103/*
104 * __kernel_cmpxchg
105 *
106 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
107 * Return zero if *ptr was changed or non-zero if no exchange happened.
108 * The C flag is also set if *ptr was changed to allow for assembly
109 * optimization in the calling code.
110 *
111 */
112
113typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
114#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
115
116
117
118/* Perform an atomic compare and swap: if the current value of `*PTR'
119   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
120   `*PTR' before the operation.*/
121static inline int arm_compare_and_swap(volatile int *ptr,
122                                       int oldval,
123                                       int newval) {
124  for (;;) {
125      int prev = *ptr;
126      if (prev != oldval)
127        return prev;
128
129      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
130        // Success.
131        return prev;
132
133      // We failed even though prev == oldval.  Try again.
134    }
135}
136
137/* Atomically add an int to memory.  */
138static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
139  for (;;) {
140      // Loop until a __kernel_cmpxchg succeeds.
141
142      int prev = *ptr;
143
144      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145        return prev + add_value;
146    }
147}
148
149/* Atomically write VALUE into `*PTR' and returns the previous
150   contents of `*PTR'.  */
151static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
152  for (;;) {
153      // Loop until a __kernel_cmpxchg succeeds.
154      int prev = *ptr;
155
156      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157        return prev;
158    }
159}
160#endif // ARM
161
162inline void Atomic::store(jint store_value, volatile jint* dest) {
163  *dest = store_value;
164}
165
166inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
167  *dest = store_value;
168}
169
170inline jint Atomic::add(jint add_value, volatile jint* dest) {
171#ifdef ARM
172  return arm_add_and_fetch(dest, add_value);
173#else
174#ifdef M68K
175  return m68k_add_and_fetch(dest, add_value);
176#else
177  return __sync_add_and_fetch(dest, add_value);
178#endif // M68K
179#endif // ARM
180}
181
182inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
183#ifdef ARM
184  return arm_add_and_fetch(dest, add_value);
185#else
186#ifdef M68K
187  return m68k_add_and_fetch(dest, add_value);
188#else
189  return __sync_add_and_fetch(dest, add_value);
190#endif // M68K
191#endif // ARM
192}
193
194inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
195  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
196}
197
198inline void Atomic::inc(volatile jint* dest) {
199  add(1, dest);
200}
201
202inline void Atomic::inc_ptr(volatile intptr_t* dest) {
203  add_ptr(1, dest);
204}
205
206inline void Atomic::inc_ptr(volatile void* dest) {
207  add_ptr(1, dest);
208}
209
210inline void Atomic::dec(volatile jint* dest) {
211  add(-1, dest);
212}
213
214inline void Atomic::dec_ptr(volatile intptr_t* dest) {
215  add_ptr(-1, dest);
216}
217
218inline void Atomic::dec_ptr(volatile void* dest) {
219  add_ptr(-1, dest);
220}
221
222inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
223#ifdef ARM
224  return arm_lock_test_and_set(dest, exchange_value);
225#else
226#ifdef M68K
227  return m68k_lock_test_and_set(dest, exchange_value);
228#else
229  // __sync_lock_test_and_set is a bizarrely named atomic exchange
230  // operation.  Note that some platforms only support this with the
231  // limitation that the only valid value to store is the immediate
232  // constant 1.  There is a test for this in JNI_CreateJavaVM().
233  jint result = __sync_lock_test_and_set (dest, exchange_value);
234  // All atomic operations are expected to be full memory barriers
235  // (see atomic.hpp). However, __sync_lock_test_and_set is not
236  // a full memory barrier, but an acquire barrier. Hence, this added
237  // barrier.
238  __sync_synchronize();
239  return result;
240#endif // M68K
241#endif // ARM
242}
243
244inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
245                                 volatile intptr_t* dest) {
246#ifdef ARM
247  return arm_lock_test_and_set(dest, exchange_value);
248#else
249#ifdef M68K
250  return m68k_lock_test_and_set(dest, exchange_value);
251#else
252  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
253  __sync_synchronize();
254  return result;
255#endif // M68K
256#endif // ARM
257}
258
259inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
260  return (void *) xchg_ptr((intptr_t) exchange_value,
261                           (volatile intptr_t*) dest);
262}
263
264inline jint Atomic::cmpxchg(jint exchange_value,
265                            volatile jint* dest,
266                            jint compare_value,
267                            cmpxchg_memory_order order) {
268#ifdef ARM
269  return arm_compare_and_swap(dest, compare_value, exchange_value);
270#else
271#ifdef M68K
272  return m68k_compare_and_swap(dest, compare_value, exchange_value);
273#else
274  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
275#endif // M68K
276#endif // ARM
277}
278
279inline jlong Atomic::cmpxchg(jlong exchange_value,
280                             volatile jlong* dest,
281                             jlong compare_value,
282                             cmpxchg_memory_order order) {
283
284  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
285}
286
287inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
288                                    volatile intptr_t* dest,
289                                    intptr_t compare_value,
290                                    cmpxchg_memory_order order) {
291#ifdef ARM
292  return arm_compare_and_swap(dest, compare_value, exchange_value);
293#else
294#ifdef M68K
295  return m68k_compare_and_swap(dest, compare_value, exchange_value);
296#else
297  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
298#endif // M68K
299#endif // ARM
300}
301
302inline void* Atomic::cmpxchg_ptr(void* exchange_value,
303                                 volatile void* dest,
304                                 void* compare_value,
305                                 cmpxchg_memory_order order) {
306
307  return (void *) cmpxchg_ptr((intptr_t) exchange_value,
308                              (volatile intptr_t*) dest,
309                              (intptr_t) compare_value,
310                              order);
311}
312
313inline jlong Atomic::load(volatile jlong* src) {
314  volatile jlong dest;
315  os::atomic_copy64(src, &dest);
316  return dest;
317}
318
319inline void Atomic::store(jlong store_value, jlong* dest) {
320  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
321}
322
323inline void Atomic::store(jlong store_value, volatile jlong* dest) {
324  os::atomic_copy64((volatile jlong*)&store_value, dest);
325}
326
327#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
328