1/*
2 * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
26#define OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
27
28#include "runtime/os.hpp"
29#include "vm_version_arm.hpp"
30
31// Implementation of class atomic
32
33/*
34 * Atomic long operations on 32-bit ARM
35 * ARM v7 supports LDREXD/STREXD synchronization instructions so no problem.
36 * ARM < v7 does not have explicit 64 atomic load/store capability.
37 * However, gcc emits LDRD/STRD instructions on v5te and LDM/STM on v5t
38 * when loading/storing 64 bits.
39 * For non-MP machines (which is all we support for ARM < v7)
40 * under current Linux distros these instructions appear atomic.
41 * See section A3.5.3 of ARM Architecture Reference Manual for ARM v7.
42 * Also, for cmpxchg64, if ARM < v7 we check for cmpxchg64 support in the
43 * Linux kernel using _kuser_helper_version. See entry-armv.S in the Linux
44 * kernel source or kernel_user_helpers.txt in Linux Doc.
45 */
46
47inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
48inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
49inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
50inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
51inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
52
53inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
54inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
55inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
56inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
57inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
58
59inline jlong Atomic::load (volatile jlong* src) {
60  assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned");
61#ifdef AARCH64
62  return *src;
63#else
64  return (*os::atomic_load_long_func)(src);
65#endif
66}
67
68inline void Atomic::store (jlong value, volatile jlong* dest) {
69  assert(((intx)dest & (sizeof(jlong)-1)) == 0, "Atomic store jlong mis-aligned");
70#ifdef AARCH64
71  *dest = value;
72#else
73  (*os::atomic_store_long_func)(value, dest);
74#endif
75}
76
77inline void Atomic::store (jlong value, jlong* dest) {
78  store(value, (volatile jlong*)dest);
79}
80
81// As per atomic.hpp all read-modify-write operations have to provide two-way
82// barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
83// store-release-with-reservation. While load-acquire combined with store-release
84// do not generally form two-way barriers, their use with reservations does - the
85// ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they
86// provide sequentially consistent semantics. All we need to add is an explicit
87// barrier in the failure path of the cmpxchg operations (as these don't execute
88// the store) - arguably this may be overly cautious as there is a very low
89// likelihood that the hardware would pull loads/stores into the region guarded
90// by the reservation.
91//
92// For ARMv7 we add explicit barriers in the stubs.
93
94inline jint Atomic::add(jint add_value, volatile jint* dest) {
95#ifdef AARCH64
96  jint val;
97  int tmp;
98  __asm__ volatile(
99    "1:\n\t"
100    " ldaxr %w[val], [%[dest]]\n\t"
101    " add %w[val], %w[val], %w[add_val]\n\t"
102    " stlxr %w[tmp], %w[val], [%[dest]]\n\t"
103    " cbnz %w[tmp], 1b\n\t"
104    : [val] "=&r" (val), [tmp] "=&r" (tmp)
105    : [add_val] "r" (add_value), [dest] "r" (dest)
106    : "memory");
107  return val;
108#else
109  return (*os::atomic_add_func)(add_value, dest);
110#endif
111}
112
113inline void Atomic::inc(volatile jint* dest) {
114  Atomic::add(1, (volatile jint *)dest);
115}
116
117inline void Atomic::dec(volatile jint* dest) {
118  Atomic::add(-1, (volatile jint *)dest);
119}
120
121inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
122#ifdef AARCH64
123  intptr_t val;
124  int tmp;
125  __asm__ volatile(
126    "1:\n\t"
127    " ldaxr %[val], [%[dest]]\n\t"
128    " add %[val], %[val], %[add_val]\n\t"
129    " stlxr %w[tmp], %[val], [%[dest]]\n\t"
130    " cbnz %w[tmp], 1b\n\t"
131    : [val] "=&r" (val), [tmp] "=&r" (tmp)
132    : [add_val] "r" (add_value), [dest] "r" (dest)
133    : "memory");
134  return val;
135#else
136  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
137#endif
138}
139
140inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
141  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
142}
143
144inline void Atomic::inc_ptr(volatile intptr_t* dest) {
145  Atomic::add_ptr(1, dest);
146}
147
148inline void Atomic::dec_ptr(volatile intptr_t* dest) {
149  Atomic::add_ptr(-1, dest);
150}
151
152inline void Atomic::inc_ptr(volatile void* dest) {
153  inc_ptr((volatile intptr_t*)dest);
154}
155
156inline void Atomic::dec_ptr(volatile void* dest) {
157  dec_ptr((volatile intptr_t*)dest);
158}
159
160
161inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
162#ifdef AARCH64
163  jint old_val;
164  int tmp;
165  __asm__ volatile(
166    "1:\n\t"
167    " ldaxr %w[old_val], [%[dest]]\n\t"
168    " stlxr %w[tmp], %w[new_val], [%[dest]]\n\t"
169    " cbnz %w[tmp], 1b\n\t"
170    : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
171    : [new_val] "r" (exchange_value), [dest] "r" (dest)
172    : "memory");
173  return old_val;
174#else
175  return (*os::atomic_xchg_func)(exchange_value, dest);
176#endif
177}
178
179inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
180#ifdef AARCH64
181  intptr_t old_val;
182  int tmp;
183  __asm__ volatile(
184    "1:\n\t"
185    " ldaxr %[old_val], [%[dest]]\n\t"
186    " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
187    " cbnz %w[tmp], 1b\n\t"
188    : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
189    : [new_val] "r" (exchange_value), [dest] "r" (dest)
190    : "memory");
191  return old_val;
192#else
193  return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest);
194#endif
195}
196
197inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
198  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
199}
200
201// The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
202
203inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order) {
204#ifdef AARCH64
205  jint rv;
206  int tmp;
207  __asm__ volatile(
208    "1:\n\t"
209    " ldaxr %w[rv], [%[dest]]\n\t"
210    " cmp %w[rv], %w[cv]\n\t"
211    " b.ne 2f\n\t"
212    " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
213    " cbnz %w[tmp], 1b\n\t"
214    " b 3f\n\t"
215    "2:\n\t"
216    " dmb sy\n\t"
217    "3:\n\t"
218    : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
219    : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
220    : "memory");
221  return rv;
222#else
223  // Warning:  Arguments are swapped to avoid moving them for kernel call
224  return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
225#endif
226}
227
228inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order) {
229#ifdef AARCH64
230  jlong rv;
231  int tmp;
232  __asm__ volatile(
233    "1:\n\t"
234    " ldaxr %[rv], [%[dest]]\n\t"
235    " cmp %[rv], %[cv]\n\t"
236    " b.ne 2f\n\t"
237    " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
238    " cbnz %w[tmp], 1b\n\t"
239    " b 3f\n\t"
240    "2:\n\t"
241    " dmb sy\n\t"
242    "3:\n\t"
243    : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
244    : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
245    : "memory");
246  return rv;
247#else
248  assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
249  return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
250#endif
251}
252
253inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order) {
254#ifdef AARCH64
255  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, order);
256#else
257  return (intptr_t)cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value, order);
258#endif
259}
260
261inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order) {
262  return (void*)cmpxchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
263}
264
265#endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
266