interfaceSupport.hpp revision 2273:1d1603768966
1/*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
26#define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
27
28#include "memory/gcLocker.hpp"
29#include "runtime/handles.inline.hpp"
30#include "runtime/mutexLocker.hpp"
31#include "runtime/orderAccess.hpp"
32#include "runtime/os.hpp"
33#include "runtime/safepoint.hpp"
34#include "runtime/vmThread.hpp"
35#include "utilities/globalDefinitions.hpp"
36#include "utilities/preserveException.hpp"
37#include "utilities/top.hpp"
38#ifdef TARGET_OS_FAMILY_linux
39# include "thread_linux.inline.hpp"
40#endif
41#ifdef TARGET_OS_FAMILY_solaris
42# include "thread_solaris.inline.hpp"
43#endif
44#ifdef TARGET_OS_FAMILY_windows
45# include "thread_windows.inline.hpp"
46#endif
47
48// Wrapper for all entry points to the virtual machine.
49// The HandleMarkCleaner is a faster version of HandleMark.
50// It relies on the fact that there is a HandleMark further
51// down the stack (in JavaCalls::call_helper), and just resets
52// to the saved values in that HandleMark.
53
54class HandleMarkCleaner: public StackObj {
55 private:
56  Thread* _thread;
57 public:
58  HandleMarkCleaner(Thread* thread) {
59    _thread = thread;
60    _thread->last_handle_mark()->push();
61  }
62  ~HandleMarkCleaner() {
63    _thread->last_handle_mark()->pop_and_restore();
64  }
65
66 private:
67  inline void* operator new(size_t size, void* ptr) {
68    return ptr;
69  }
70};
71
72// InterfaceSupport provides functionality used by the __LEAF and __ENTRY
73// macros. These macros are used to guard entry points into the VM and
74// perform checks upon leave of the VM.
75
76
77class InterfaceSupport: AllStatic {
78# ifdef ASSERT
79 public:
80  static long _scavenge_alot_counter;
81  static long _fullgc_alot_counter;
82  static long _number_of_calls;
83  static long _fullgc_alot_invocation;
84
85  // tracing
86  static void trace(const char* result_type, const char* header);
87
88  // Helper methods used to implement +ScavengeALot and +FullGCALot
89  static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
90  static void gc_alot();
91
92  static void walk_stack_from(vframe* start_vf);
93  static void walk_stack();
94
95# ifdef ENABLE_ZAP_DEAD_LOCALS
96  static void zap_dead_locals_old();
97# endif
98
99  static void zombieAll();
100  static void unlinkSymbols();
101  static void deoptimizeAll();
102  static void stress_derived_pointers();
103  static void verify_stack();
104  static void verify_last_frame();
105# endif
106
107 public:
108  // OS dependent stuff
109#ifdef TARGET_OS_FAMILY_linux
110# include "interfaceSupport_linux.hpp"
111#endif
112#ifdef TARGET_OS_FAMILY_solaris
113# include "interfaceSupport_solaris.hpp"
114#endif
115#ifdef TARGET_OS_FAMILY_windows
116# include "interfaceSupport_windows.hpp"
117#endif
118
119};
120
121
122// Basic class for all thread transition classes.
123
124class ThreadStateTransition : public StackObj {
125 protected:
126  JavaThread* _thread;
127 public:
128  ThreadStateTransition(JavaThread *thread) {
129    _thread = thread;
130    assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
131  }
132
133  // Change threadstate in a manner, so safepoint can detect changes.
134  // Time-critical: called on exit from every runtime routine
135  static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
136    assert(from != _thread_in_Java, "use transition_from_java");
137    assert(from != _thread_in_native, "use transition_from_native");
138    assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
139    assert(thread->thread_state() == from, "coming from wrong thread state");
140    // Change to transition state (assumes total store ordering!  -Urs)
141    thread->set_thread_state((JavaThreadState)(from + 1));
142
143    // Make sure new state is seen by VM thread
144    if (os::is_MP()) {
145      if (UseMembar) {
146        // Force a fence between the write above and read below
147        OrderAccess::fence();
148      } else {
149        // store to serialize page so VM thread can do pseudo remote membar
150        os::write_memory_serialize_page(thread);
151      }
152    }
153
154    if (SafepointSynchronize::do_call_back()) {
155      SafepointSynchronize::block(thread);
156    }
157    thread->set_thread_state(to);
158
159    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
160  }
161
162  // transition_and_fence must be used on any thread state transition
163  // where there might not be a Java call stub on the stack, in
164  // particular on Windows where the Structured Exception Handler is
165  // set up in the call stub. os::write_memory_serialize_page() can
166  // fault and we can't recover from it on Windows without a SEH in
167  // place.
168  static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
169    assert(thread->thread_state() == from, "coming from wrong thread state");
170    assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
171    // Change to transition state (assumes total store ordering!  -Urs)
172    thread->set_thread_state((JavaThreadState)(from + 1));
173
174    // Make sure new state is seen by VM thread
175    if (os::is_MP()) {
176      if (UseMembar) {
177        // Force a fence between the write above and read below
178        OrderAccess::fence();
179      } else {
180        // Must use this rather than serialization page in particular on Windows
181        InterfaceSupport::serialize_memory(thread);
182      }
183    }
184
185    if (SafepointSynchronize::do_call_back()) {
186      SafepointSynchronize::block(thread);
187    }
188    thread->set_thread_state(to);
189
190    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
191  }
192
193  // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
194  // never block on entry to the VM. This will break the code, since e.g. preserve arguments
195  // have not been setup.
196  static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
197    assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
198    thread->set_thread_state(to);
199  }
200
201  static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
202    assert((to & 1) == 0, "odd numbers are transitions states");
203    assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
204    // Change to transition state (assumes total store ordering!  -Urs)
205    thread->set_thread_state(_thread_in_native_trans);
206
207    // Make sure new state is seen by GC thread
208    if (os::is_MP()) {
209      if (UseMembar) {
210        // Force a fence between the write above and read below
211        OrderAccess::fence();
212      } else {
213        // Must use this rather than serialization page in particular on Windows
214        InterfaceSupport::serialize_memory(thread);
215      }
216    }
217
218    // We never install asynchronous exceptions when coming (back) in
219    // to the runtime from native code because the runtime is not set
220    // up to handle exceptions floating around at arbitrary points.
221    if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
222      JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
223
224      // Clear unhandled oops anywhere where we could block, even if we don't.
225      CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
226    }
227
228    thread->set_thread_state(to);
229  }
230 protected:
231   void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
232   void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
233   void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
234   void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
235};
236
237
238class ThreadInVMfromJava : public ThreadStateTransition {
239 public:
240  ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
241    trans_from_java(_thread_in_vm);
242  }
243  ~ThreadInVMfromJava()  {
244    trans(_thread_in_vm, _thread_in_Java);
245    // Check for pending. async. exceptions or suspends.
246    if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
247  }
248};
249
250
251class ThreadInVMfromUnknown {
252 private:
253  JavaThread* _thread;
254 public:
255  ThreadInVMfromUnknown() : _thread(NULL) {
256    Thread* t = Thread::current();
257    if (t->is_Java_thread()) {
258      JavaThread* t2 = (JavaThread*) t;
259      if (t2->thread_state() == _thread_in_native) {
260        _thread = t2;
261        ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
262        // Used to have a HandleMarkCleaner but that is dangerous as
263        // it could free a handle in our (indirect, nested) caller.
264        // We expect any handles will be short lived and figure we
265        // don't need an actual HandleMark.
266      }
267    }
268  }
269  ~ThreadInVMfromUnknown()  {
270    if (_thread) {
271      ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
272    }
273  }
274};
275
276
277class ThreadInVMfromNative : public ThreadStateTransition {
278 public:
279  ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
280    trans_from_native(_thread_in_vm);
281  }
282  ~ThreadInVMfromNative() {
283    trans_and_fence(_thread_in_vm, _thread_in_native);
284  }
285};
286
287
288class ThreadToNativeFromVM : public ThreadStateTransition {
289 public:
290  ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
291    // We are leaving the VM at this point and going directly to native code.
292    // Block, if we are in the middle of a safepoint synchronization.
293    assert(!thread->owns_locks(), "must release all locks when leaving VM");
294    thread->frame_anchor()->make_walkable(thread);
295    trans_and_fence(_thread_in_vm, _thread_in_native);
296    // Check for pending. async. exceptions or suspends.
297    if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
298  }
299
300  ~ThreadToNativeFromVM() {
301    trans_from_native(_thread_in_vm);
302    // We don't need to clear_walkable because it will happen automagically when we return to java
303  }
304};
305
306
307class ThreadBlockInVM : public ThreadStateTransition {
308 public:
309  ThreadBlockInVM(JavaThread *thread)
310  : ThreadStateTransition(thread) {
311    // Once we are blocked vm expects stack to be walkable
312    thread->frame_anchor()->make_walkable(thread);
313    trans_and_fence(_thread_in_vm, _thread_blocked);
314  }
315  ~ThreadBlockInVM() {
316    trans_and_fence(_thread_blocked, _thread_in_vm);
317    // We don't need to clear_walkable because it will happen automagically when we return to java
318  }
319};
320
321
322// This special transition class is only used to prevent asynchronous exceptions
323// from being installed on vm exit in situations where we can't tolerate them.
324// See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
325class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
326 public:
327  ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
328    trans_from_java(_thread_in_vm);
329  }
330  ~ThreadInVMfromJavaNoAsyncException()  {
331    trans(_thread_in_vm, _thread_in_Java);
332    // NOTE: We do not check for pending. async. exceptions.
333    // If we did and moved the pending async exception over into the
334    // pending exception field, we would need to deopt (currently C2
335    // only). However, to do so would require that we transition back
336    // to the _thread_in_vm state. Instead we postpone the handling of
337    // the async exception.
338
339    // Check for pending. suspends only.
340    if (_thread->has_special_runtime_exit_condition())
341      _thread->handle_special_runtime_exit_condition(false);
342  }
343};
344
345// Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
346// Can be used to verify properties on enter/exit of the VM.
347
348#ifdef ASSERT
349class VMEntryWrapper {
350 public:
351  VMEntryWrapper() {
352    if (VerifyLastFrame) {
353      InterfaceSupport::verify_last_frame();
354    }
355  }
356
357  ~VMEntryWrapper() {
358    InterfaceSupport::check_gc_alot();
359    if (WalkStackALot) {
360      InterfaceSupport::walk_stack();
361    }
362#ifdef ENABLE_ZAP_DEAD_LOCALS
363    if (ZapDeadLocalsOld) {
364      InterfaceSupport::zap_dead_locals_old();
365    }
366#endif
367#ifdef COMPILER2
368    // This option is not used by Compiler 1
369    if (StressDerivedPointers) {
370      InterfaceSupport::stress_derived_pointers();
371    }
372#endif
373    if (DeoptimizeALot || DeoptimizeRandom) {
374      InterfaceSupport::deoptimizeAll();
375    }
376    if (ZombieALot) {
377      InterfaceSupport::zombieAll();
378    }
379    if (UnlinkSymbolsALot) {
380      InterfaceSupport::unlinkSymbols();
381    }
382    // do verification AFTER potential deoptimization
383    if (VerifyStack) {
384      InterfaceSupport::verify_stack();
385    }
386
387  }
388};
389
390
391class VMNativeEntryWrapper {
392 public:
393  VMNativeEntryWrapper() {
394    if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
395  }
396
397  ~VMNativeEntryWrapper() {
398    if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
399  }
400};
401
402#endif
403
404
405// VM-internal runtime interface support
406
407#ifdef ASSERT
408
409class RuntimeHistogramElement : public HistogramElement {
410  public:
411   RuntimeHistogramElement(const char* name);
412};
413
414#define TRACE_CALL(result_type, header)                            \
415  InterfaceSupport::_number_of_calls++;                            \
416  if (TraceRuntimeCalls)                                           \
417    InterfaceSupport::trace(#result_type, #header);                \
418  if (CountRuntimeCalls) {                                         \
419    static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
420    if (e != NULL) e->increment_count();                           \
421  }
422#else
423#define TRACE_CALL(result_type, header)                            \
424  /* do nothing */
425#endif
426
427
428// LEAF routines do not lock, GC or throw exceptions
429
430#define __LEAF(result_type, header)                                  \
431  TRACE_CALL(result_type, header)                                    \
432  debug_only(NoHandleMark __hm;)                                     \
433  /* begin of body */
434
435
436// ENTRY routines may lock, GC and throw exceptions
437
438#define __ENTRY(result_type, header, thread)                         \
439  TRACE_CALL(result_type, header)                                    \
440  HandleMarkCleaner __hm(thread);                                    \
441  Thread* THREAD = thread;                                           \
442  /* begin of body */
443
444
445// QUICK_ENTRY routines behave like ENTRY but without a handle mark
446
447#define __QUICK_ENTRY(result_type, header, thread)                   \
448  TRACE_CALL(result_type, header)                                    \
449  debug_only(NoHandleMark __hm;)                                     \
450  Thread* THREAD = thread;                                           \
451  /* begin of body */
452
453
454// Definitions for IRT (Interpreter Runtime)
455// (thread is an argument passed in to all these routines)
456
457#define IRT_ENTRY(result_type, header)                               \
458  result_type header {                                               \
459    ThreadInVMfromJava __tiv(thread);                                \
460    __ENTRY(result_type, header, thread)                             \
461    debug_only(VMEntryWrapper __vew;)
462
463
464#define IRT_LEAF(result_type, header)                                \
465  result_type header {                                               \
466    __LEAF(result_type, header)                                      \
467    debug_only(No_Safepoint_Verifier __nspv(true);)
468
469
470#define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
471  result_type header {                                               \
472    ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
473    __ENTRY(result_type, header, thread)                             \
474    debug_only(VMEntryWrapper __vew;)
475
476// Another special case for nmethod_entry_point so the nmethod that the
477// interpreter is about to branch to doesn't get flushed before as we
478// branch to it's interpreter_entry_point.  Skip stress testing here too.
479// Also we don't allow async exceptions because it is just too painful.
480#define IRT_ENTRY_FOR_NMETHOD(result_type, header)                   \
481  result_type header {                                               \
482    nmethodLocker _nmlock(nm);                                       \
483    ThreadInVMfromJavaNoAsyncException __tiv(thread);                                \
484    __ENTRY(result_type, header, thread)
485
486#define IRT_END }
487
488
489// Definitions for JRT (Java (Compiler/Shared) Runtime)
490
491#define JRT_ENTRY(result_type, header)                               \
492  result_type header {                                               \
493    ThreadInVMfromJava __tiv(thread);                                \
494    __ENTRY(result_type, header, thread)                             \
495    debug_only(VMEntryWrapper __vew;)
496
497
498#define JRT_LEAF(result_type, header)                                \
499  result_type header {                                               \
500  __LEAF(result_type, header)                                        \
501  debug_only(JRT_Leaf_Verifier __jlv;)
502
503
504#define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
505  result_type header {                                               \
506    ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
507    __ENTRY(result_type, header, thread)                             \
508    debug_only(VMEntryWrapper __vew;)
509
510// Same as JRT Entry but allows for return value after the safepoint
511// to get back into Java from the VM
512#define JRT_BLOCK_ENTRY(result_type, header)                         \
513  result_type header {                                               \
514    TRACE_CALL(result_type, header)                                  \
515    HandleMarkCleaner __hm(thread);
516
517#define JRT_BLOCK                                                    \
518    {                                                                \
519    ThreadInVMfromJava __tiv(thread);                                \
520    Thread* THREAD = thread;                                         \
521    debug_only(VMEntryWrapper __vew;)
522
523#define JRT_BLOCK_END }
524
525#define JRT_END }
526
527// Definitions for JNI
528
529#define JNI_ENTRY(result_type, header)                               \
530    JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
531    WeakPreserveExceptionMark __wem(thread);
532
533#define JNI_ENTRY_NO_PRESERVE(result_type, header)             \
534extern "C" {                                                         \
535  result_type JNICALL header {                                \
536    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
537    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
538    ThreadInVMfromNative __tiv(thread);                              \
539    debug_only(VMNativeEntryWrapper __vew;)                          \
540    __ENTRY(result_type, header, thread)
541
542
543// Ensure that the VMNativeEntryWrapper constructor, which can cause
544// a GC, is called outside the NoHandleMark (set via __QUICK_ENTRY).
545#define JNI_QUICK_ENTRY(result_type, header)                         \
546extern "C" {                                                         \
547  result_type JNICALL header {                                \
548    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
549    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
550    ThreadInVMfromNative __tiv(thread);                              \
551    debug_only(VMNativeEntryWrapper __vew;)                          \
552    __QUICK_ENTRY(result_type, header, thread)
553
554
555#define JNI_LEAF(result_type, header)                                \
556extern "C" {                                                         \
557  result_type JNICALL header {                                \
558    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
559    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
560    __LEAF(result_type, header)
561
562
563// Close the routine and the extern "C"
564#define JNI_END } }
565
566
567
568// Definitions for JVM
569
570#define JVM_ENTRY(result_type, header)                               \
571extern "C" {                                                         \
572  result_type JNICALL header {                                       \
573    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
574    ThreadInVMfromNative __tiv(thread);                              \
575    debug_only(VMNativeEntryWrapper __vew;)                          \
576    __ENTRY(result_type, header, thread)
577
578
579#define JVM_ENTRY_NO_ENV(result_type, header)                        \
580extern "C" {                                                         \
581  result_type JNICALL header {                                       \
582    JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
583    ThreadInVMfromNative __tiv(thread);                              \
584    debug_only(VMNativeEntryWrapper __vew;)                          \
585    __ENTRY(result_type, header, thread)
586
587
588#define JVM_QUICK_ENTRY(result_type, header)                         \
589extern "C" {                                                         \
590  result_type JNICALL header {                                       \
591    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
592    ThreadInVMfromNative __tiv(thread);                              \
593    debug_only(VMNativeEntryWrapper __vew;)                          \
594    __QUICK_ENTRY(result_type, header, thread)
595
596
597#define JVM_LEAF(result_type, header)                                \
598extern "C" {                                                         \
599  result_type JNICALL header {                                       \
600    VM_Exit::block_if_vm_exited();                                   \
601    __LEAF(result_type, header)
602
603
604#define JVM_END } }
605
606#endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
607