orderAccess_bsd_x86.inline.hpp revision 11857:d0fbf661cc16
1/*
2 * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
26#define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
27
28#include "runtime/atomic.hpp"
29#include "runtime/orderAccess.hpp"
30#include "runtime/os.hpp"
31
32// Compiler version last used for testing: clang 5.1
33// Please update this information when this file changes
34
35// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
36static inline void compiler_barrier() {
37  __asm__ volatile ("" : : : "memory");
38}
39
40// x86 is TSO and hence only needs a fence for storeload
41// However, a compiler barrier is still needed to prevent reordering
42// between volatile and non-volatile memory accesses.
43
44// Implementation of class OrderAccess.
45
46inline void OrderAccess::loadload()   { compiler_barrier(); }
47inline void OrderAccess::storestore() { compiler_barrier(); }
48inline void OrderAccess::loadstore()  { compiler_barrier(); }
49inline void OrderAccess::storeload()  { fence();            }
50
51inline void OrderAccess::acquire()    { compiler_barrier(); }
52inline void OrderAccess::release()    { compiler_barrier(); }
53
54inline void OrderAccess::fence() {
55  if (os::is_MP()) {
56    // always use locked addl since mfence is sometimes expensive
57#ifdef AMD64
58    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
59#else
60    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
61#endif
62  }
63  compiler_barrier();
64}
65
66template<>
67inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {
68  __asm__ volatile (  "xchgb (%2),%0"
69                    : "=q" (v)
70                    : "0" (v), "r" (p)
71                    : "memory");
72}
73template<>
74inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
75  __asm__ volatile (  "xchgw (%2),%0"
76                    : "=r" (v)
77                    : "0" (v), "r" (p)
78                    : "memory");
79}
80template<>
81inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {
82  __asm__ volatile (  "xchgl (%2),%0"
83                    : "=r" (v)
84                    : "0" (v), "r" (p)
85                    : "memory");
86}
87
88#ifdef AMD64
89template<>
90inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong*  p, jlong  v) {
91  __asm__ volatile (  "xchgq (%2), %0"
92                    : "=r" (v)
93                    : "0" (v), "r" (p)
94                    : "memory");
95}
96#endif // AMD64
97
98template<>
99inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat*  p, jfloat  v) {
100  release_store_fence((volatile jint*)p, jint_cast(v));
101}
102template<>
103inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
104  release_store_fence((volatile jlong*)p, jlong_cast(v));
105}
106
107#define VM_HAS_GENERALIZED_ORDER_ACCESS 1
108
109#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
110