1/*
2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
26#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
27
28#include "runtime/atomic.hpp"
29#include "runtime/orderAccess.hpp"
30#include "runtime/os.hpp"
31
32// Compiler version last used for testing: gcc 4.8.2
33// Please update this information when this file changes
34
35// Implementation of class OrderAccess.
36
37// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
38static inline void compiler_barrier() {
39  __asm__ volatile ("" : : : "memory");
40}
41
42inline void OrderAccess::loadload()   { compiler_barrier(); }
43inline void OrderAccess::storestore() { compiler_barrier(); }
44inline void OrderAccess::loadstore()  { compiler_barrier(); }
45inline void OrderAccess::storeload()  { fence();            }
46
47inline void OrderAccess::acquire()    { compiler_barrier(); }
48inline void OrderAccess::release()    { compiler_barrier(); }
49
50inline void OrderAccess::fence() {
51  if (os::is_MP()) {
52    // always use locked addl since mfence is sometimes expensive
53#ifdef AMD64
54    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
55#else
56    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
57#endif
58  }
59  compiler_barrier();
60}
61
62template<>
63inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {
64  __asm__ volatile (  "xchgb (%2),%0"
65                    : "=q" (v)
66                    : "0" (v), "r" (p)
67                    : "memory");
68}
69template<>
70inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
71  __asm__ volatile (  "xchgw (%2),%0"
72                    : "=r" (v)
73                    : "0" (v), "r" (p)
74                    : "memory");
75}
76template<>
77inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {
78  __asm__ volatile (  "xchgl (%2),%0"
79                    : "=r" (v)
80                    : "0" (v), "r" (p)
81                    : "memory");
82}
83
84#ifdef AMD64
85template<>
86inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong*  p, jlong  v) {
87  __asm__ volatile (  "xchgq (%2), %0"
88                    : "=r" (v)
89                    : "0" (v), "r" (p)
90                    : "memory");
91}
92#endif // AMD64
93
94template<>
95inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat*  p, jfloat  v) {
96  release_store_fence((volatile jint*)p, jint_cast(v));
97}
98template<>
99inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
100  release_store_fence((volatile jlong*)p, jlong_cast(v));
101}
102
103#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
104
105#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
106