atomic_bsd_zero.hpp revision 13477:4d61110c6046
1/*
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
27#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
28
29#include "runtime/os.hpp"
30
31// Implementation of class atomic
32
33#ifdef M68K
34
35/*
36 * __m68k_cmpxchg
37 *
38 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
39 * Returns newval on success and oldval if no exchange happened.
40 * This implementation is processor specific and works on
41 * 68020 68030 68040 and 68060.
42 *
43 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
44 * instruction.
45 * Using a kernelhelper would be better for arch complete implementation.
46 *
47 */
48
49static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
50  int ret;
51  __asm __volatile ("cas%.l %0,%2,%1"
52                   : "=d" (ret), "+m" (*(ptr))
53                   : "d" (newval), "0" (oldval));
54  return ret;
55}
56
57/* Perform an atomic compare and swap: if the current value of `*PTR'
58   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
59   `*PTR' before the operation.*/
60static inline int m68k_compare_and_swap(int newval,
61                                        volatile int *ptr,
62                                        int oldval) {
63  for (;;) {
64      int prev = *ptr;
65      if (prev != oldval)
66        return prev;
67
68      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
69        // Success.
70        return prev;
71
72      // We failed even though prev == oldval.  Try again.
73    }
74}
75
76/* Atomically add an int to memory.  */
77static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
78  for (;;) {
79      // Loop until success.
80
81      int prev = *ptr;
82
83      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
84        return prev + add_value;
85    }
86}
87
88/* Atomically write VALUE into `*PTR' and returns the previous
89   contents of `*PTR'.  */
90static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
91  for (;;) {
92      // Loop until success.
93      int prev = *ptr;
94
95      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
96        return prev;
97    }
98}
99#endif // M68K
100
101#ifdef ARM
102
103/*
104 * __kernel_cmpxchg
105 *
106 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
107 * Return zero if *ptr was changed or non-zero if no exchange happened.
108 * The C flag is also set if *ptr was changed to allow for assembly
109 * optimization in the calling code.
110 *
111 */
112
113typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
114#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
115
116
117
118/* Perform an atomic compare and swap: if the current value of `*PTR'
119   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
120   `*PTR' before the operation.*/
121static inline int arm_compare_and_swap(int newval,
122                                       volatile int *ptr,
123                                       int oldval) {
124  for (;;) {
125      int prev = *ptr;
126      if (prev != oldval)
127        return prev;
128
129      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
130        // Success.
131        return prev;
132
133      // We failed even though prev == oldval.  Try again.
134    }
135}
136
137/* Atomically add an int to memory.  */
138static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
139  for (;;) {
140      // Loop until a __kernel_cmpxchg succeeds.
141
142      int prev = *ptr;
143
144      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145        return prev + add_value;
146    }
147}
148
149/* Atomically write VALUE into `*PTR' and returns the previous
150   contents of `*PTR'.  */
151static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
152  for (;;) {
153      // Loop until a __kernel_cmpxchg succeeds.
154      int prev = *ptr;
155
156      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157        return prev;
158    }
159}
160#endif // ARM
161
162inline void Atomic::store(jint store_value, volatile jint* dest) {
163#if !defined(ARM) && !defined(M68K)
164  __sync_synchronize();
165#endif
166  *dest = store_value;
167}
168
169inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
170#if !defined(ARM) && !defined(M68K)
171  __sync_synchronize();
172#endif
173  *dest = store_value;
174}
175
176inline jint Atomic::add(jint add_value, volatile jint* dest) {
177#ifdef ARM
178  return arm_add_and_fetch(dest, add_value);
179#else
180#ifdef M68K
181  return m68k_add_and_fetch(dest, add_value);
182#else
183  return __sync_add_and_fetch(dest, add_value);
184#endif // M68K
185#endif // ARM
186}
187
188inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
189#ifdef ARM
190  return arm_add_and_fetch(dest, add_value);
191#else
192#ifdef M68K
193  return m68k_add_and_fetch(dest, add_value);
194#else
195  return __sync_add_and_fetch(dest, add_value);
196#endif // M68K
197#endif // ARM
198}
199
200inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
201  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
202}
203
204inline void Atomic::inc(volatile jint* dest) {
205  add(1, dest);
206}
207
208inline void Atomic::inc_ptr(volatile intptr_t* dest) {
209  add_ptr(1, dest);
210}
211
212inline void Atomic::inc_ptr(volatile void* dest) {
213  add_ptr(1, dest);
214}
215
216inline void Atomic::dec(volatile jint* dest) {
217  add(-1, dest);
218}
219
220inline void Atomic::dec_ptr(volatile intptr_t* dest) {
221  add_ptr(-1, dest);
222}
223
224inline void Atomic::dec_ptr(volatile void* dest) {
225  add_ptr(-1, dest);
226}
227
228inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
229#ifdef ARM
230  return arm_lock_test_and_set(dest, exchange_value);
231#else
232#ifdef M68K
233  return m68k_lock_test_and_set(dest, exchange_value);
234#else
235  // __sync_lock_test_and_set is a bizarrely named atomic exchange
236  // operation.  Note that some platforms only support this with the
237  // limitation that the only valid value to store is the immediate
238  // constant 1.  There is a test for this in JNI_CreateJavaVM().
239  jint result = __sync_lock_test_and_set (dest, exchange_value);
240  // All atomic operations are expected to be full memory barriers
241  // (see atomic.hpp). However, __sync_lock_test_and_set is not
242  // a full memory barrier, but an acquire barrier. Hence, this added
243  // barrier.
244  __sync_synchronize();
245  return result;
246#endif // M68K
247#endif // ARM
248}
249
250inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
251                                 volatile intptr_t* dest) {
252#ifdef ARM
253  return arm_lock_test_and_set(dest, exchange_value);
254#else
255#ifdef M68K
256  return m68k_lock_test_and_set(dest, exchange_value);
257#else
258  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
259  __sync_synchronize();
260  return result;
261#endif // M68K
262#endif // ARM
263}
264
265inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
266  return (void *) xchg_ptr((intptr_t) exchange_value,
267                           (volatile intptr_t*) dest);
268}
269
270// No direct support for cmpxchg of bytes; emulate using int.
271template<>
272struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
273
274template<>
275template<typename T>
276inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
277                                                T volatile* dest,
278                                                T compare_value,
279                                                cmpxchg_memory_order order) const {
280  STATIC_CAST(4 == sizeof(T));
281#ifdef ARM
282  return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
283#else
284#ifdef M68K
285  return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
286#else
287  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
288#endif // M68K
289#endif // ARM
290}
291
292template<>
293template<typename T>
294inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
295                                                T volatile* dest,
296                                                T compare_value,
297                                                cmpxchg_memory_order order) const {
298  STATIC_CAST(8 == sizeof(T));
299  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
300}
301
302inline jlong Atomic::load(const volatile jlong* src) {
303  volatile jlong dest;
304  os::atomic_copy64(src, &dest);
305  return dest;
306}
307
308inline void Atomic::store(jlong store_value, jlong* dest) {
309  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
310}
311
312inline void Atomic::store(jlong store_value, volatile jlong* dest) {
313  os::atomic_copy64((volatile jlong*)&store_value, dest);
314}
315
316#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
317