1/*
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#ifndef OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
27#define OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
28
29#include "runtime/os.hpp"
30
31// Implementation of class atomic
32
33#ifdef M68K
34
35/*
36 * __m68k_cmpxchg
37 *
38 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
39 * Returns newval on success and oldval if no exchange happened.
40 * This implementation is processor specific and works on
41 * 68020 68030 68040 and 68060.
42 *
43 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS
44 * instruction.
45 * Using a kernelhelper would be better for arch complete implementation.
46 *
47 */
48
49static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) {
50  int ret;
51  __asm __volatile ("cas%.l %0,%2,%1"
52                   : "=d" (ret), "+m" (*(ptr))
53                   : "d" (newval), "0" (oldval));
54  return ret;
55}
56
57/* Perform an atomic compare and swap: if the current value of `*PTR'
58   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
59   `*PTR' before the operation.*/
60static inline int m68k_compare_and_swap(int newval,
61                                        volatile int *ptr,
62                                        int oldval) {
63  for (;;) {
64      int prev = *ptr;
65      if (prev != oldval)
66        return prev;
67
68      if (__m68k_cmpxchg (prev, newval, ptr) == newval)
69        // Success.
70        return prev;
71
72      // We failed even though prev == oldval.  Try again.
73    }
74}
75
76/* Atomically add an int to memory.  */
77static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
78  for (;;) {
79      // Loop until success.
80
81      int prev = *ptr;
82
83      if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value)
84        return prev + add_value;
85    }
86}
87
88/* Atomically write VALUE into `*PTR' and returns the previous
89   contents of `*PTR'.  */
90static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) {
91  for (;;) {
92      // Loop until success.
93      int prev = *ptr;
94
95      if (__m68k_cmpxchg (prev, newval, ptr) == prev)
96        return prev;
97    }
98}
99#endif // M68K
100
101#ifdef ARM
102
103/*
104 * __kernel_cmpxchg
105 *
106 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
107 * Return zero if *ptr was changed or non-zero if no exchange happened.
108 * The C flag is also set if *ptr was changed to allow for assembly
109 * optimization in the calling code.
110 *
111 */
112
113typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
114#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
115
116
117
118/* Perform an atomic compare and swap: if the current value of `*PTR'
119   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
120   `*PTR' before the operation.*/
121static inline int arm_compare_and_swap(int newval,
122                                       volatile int *ptr,
123                                       int oldval) {
124  for (;;) {
125      int prev = *ptr;
126      if (prev != oldval)
127        return prev;
128
129      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
130        // Success.
131        return prev;
132
133      // We failed even though prev == oldval.  Try again.
134    }
135}
136
137/* Atomically add an int to memory.  */
138static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
139  for (;;) {
140      // Loop until a __kernel_cmpxchg succeeds.
141
142      int prev = *ptr;
143
144      if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
145        return prev + add_value;
146    }
147}
148
149/* Atomically write VALUE into `*PTR' and returns the previous
150   contents of `*PTR'.  */
151static inline int arm_lock_test_and_set(volatile int *ptr, int newval) {
152  for (;;) {
153      // Loop until a __kernel_cmpxchg succeeds.
154      int prev = *ptr;
155
156      if (__kernel_cmpxchg (prev, newval, ptr) == 0)
157        return prev;
158    }
159}
160#endif // ARM
161
162inline void Atomic::store(jint store_value, volatile jint* dest) {
163#if !defined(ARM) && !defined(M68K)
164  __sync_synchronize();
165#endif
166  *dest = store_value;
167}
168
169inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) {
170#if !defined(ARM) && !defined(M68K)
171  __sync_synchronize();
172#endif
173  *dest = store_value;
174}
175
176template<size_t byte_size>
177struct Atomic::PlatformAdd
178  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
179{
180  template<typename I, typename D>
181  D add_and_fetch(I add_value, D volatile* dest) const;
182};
183
184template<>
185template<typename I, typename D>
186inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
187  STATIC_ASSERT(4 == sizeof(I));
188  STATIC_ASSERT(4 == sizeof(D));
189
190#ifdef ARM
191  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
192#else
193#ifdef M68K
194  return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
195#else
196  return __sync_add_and_fetch(dest, add_value);
197#endif // M68K
198#endif // ARM
199}
200
201template<>
202template<typename I, typename D>
203inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
204  STATIC_ASSERT(8 == sizeof(I));
205  STATIC_ASSERT(8 == sizeof(D));
206
207  return __sync_add_and_fetch(dest, add_value);
208}
209
210inline void Atomic::inc(volatile jint* dest) {
211  add(1, dest);
212}
213
214inline void Atomic::inc_ptr(volatile intptr_t* dest) {
215  add_ptr(1, dest);
216}
217
218inline void Atomic::inc_ptr(volatile void* dest) {
219  add_ptr(1, dest);
220}
221
222inline void Atomic::dec(volatile jint* dest) {
223  add(-1, dest);
224}
225
226inline void Atomic::dec_ptr(volatile intptr_t* dest) {
227  add_ptr(-1, dest);
228}
229
230inline void Atomic::dec_ptr(volatile void* dest) {
231  add_ptr(-1, dest);
232}
233
234inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
235#ifdef ARM
236  return arm_lock_test_and_set(dest, exchange_value);
237#else
238#ifdef M68K
239  return m68k_lock_test_and_set(dest, exchange_value);
240#else
241  // __sync_lock_test_and_set is a bizarrely named atomic exchange
242  // operation.  Note that some platforms only support this with the
243  // limitation that the only valid value to store is the immediate
244  // constant 1.  There is a test for this in JNI_CreateJavaVM().
245  jint result = __sync_lock_test_and_set (dest, exchange_value);
246  // All atomic operations are expected to be full memory barriers
247  // (see atomic.hpp). However, __sync_lock_test_and_set is not
248  // a full memory barrier, but an acquire barrier. Hence, this added
249  // barrier.
250  __sync_synchronize();
251  return result;
252#endif // M68K
253#endif // ARM
254}
255
256inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value,
257                                 volatile intptr_t* dest) {
258#ifdef ARM
259  return arm_lock_test_and_set(dest, exchange_value);
260#else
261#ifdef M68K
262  return m68k_lock_test_and_set(dest, exchange_value);
263#else
264  intptr_t result = __sync_lock_test_and_set (dest, exchange_value);
265  __sync_synchronize();
266  return result;
267#endif // M68K
268#endif // ARM
269}
270
271inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
272  return (void *) xchg_ptr((intptr_t) exchange_value,
273                           (volatile intptr_t*) dest);
274}
275
276// No direct support for cmpxchg of bytes; emulate using int.
277template<>
278struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
279
280template<>
281template<typename T>
282inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
283                                                T volatile* dest,
284                                                T compare_value,
285                                                cmpxchg_memory_order order) const {
286  STATIC_ASSERT(4 == sizeof(T));
287#ifdef ARM
288  return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
289#else
290#ifdef M68K
291  return cmpxchg_using_helper<int>(m68k_compare_and_swap, exchange_value, dest, compare_value);
292#else
293  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
294#endif // M68K
295#endif // ARM
296}
297
298template<>
299template<typename T>
300inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
301                                                T volatile* dest,
302                                                T compare_value,
303                                                cmpxchg_memory_order order) const {
304  STATIC_ASSERT(8 == sizeof(T));
305  return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
306}
307
308inline jlong Atomic::load(const volatile jlong* src) {
309  volatile jlong dest;
310  os::atomic_copy64(src, &dest);
311  return dest;
312}
313
314inline void Atomic::store(jlong store_value, jlong* dest) {
315  os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest);
316}
317
318inline void Atomic::store(jlong store_value, volatile jlong* dest) {
319  os::atomic_copy64((volatile jlong*)&store_value, dest);
320}
321
322#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
323