interfaceSupport.hpp revision 5976:2b8e28fdf503
1282785Sgjb/*
2282785Sgjb * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3282785Sgjb * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4282785Sgjb *
5282785Sgjb * This code is free software; you can redistribute it and/or modify it
6282785Sgjb * under the terms of the GNU General Public License version 2 only, as
7282785Sgjb * published by the Free Software Foundation.
8282785Sgjb *
9282785Sgjb * This code is distributed in the hope that it will be useful, but WITHOUT
10282785Sgjb * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11282785Sgjb * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12282785Sgjb * version 2 for more details (a copy is included in the LICENSE file that
13282785Sgjb * accompanied this code).
14282787Sgjb *
15282785Sgjb * You should have received a copy of the GNU General Public License version
16282785Sgjb * 2 along with this work; if not, write to the Free Software Foundation,
17282785Sgjb * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18282785Sgjb *
19282785Sgjb * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20282785Sgjb * or visit www.oracle.com if you need additional information or have any
21282785Sgjb * questions.
22282785Sgjb *
23282785Sgjb */
24282785Sgjb
25282785Sgjb#ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
26282785Sgjb#define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
27282785Sgjb
28282785Sgjb#include "memory/gcLocker.hpp"
29282785Sgjb#include "runtime/handles.inline.hpp"
30282785Sgjb#include "runtime/mutexLocker.hpp"
31282785Sgjb#include "runtime/orderAccess.hpp"
32282785Sgjb#include "runtime/os.hpp"
33282785Sgjb#include "runtime/safepoint.hpp"
34282785Sgjb#include "runtime/thread.inline.hpp"
35282785Sgjb#include "runtime/vmThread.hpp"
36282785Sgjb#include "utilities/globalDefinitions.hpp"
37282785Sgjb#include "utilities/preserveException.hpp"
38282785Sgjb#include "utilities/top.hpp"
39282785Sgjb
40282785Sgjb// Wrapper for all entry points to the virtual machine.
41282785Sgjb// The HandleMarkCleaner is a faster version of HandleMark.
42282785Sgjb// It relies on the fact that there is a HandleMark further
43282785Sgjb// down the stack (in JavaCalls::call_helper), and just resets
44282785Sgjb// to the saved values in that HandleMark.
45282785Sgjb
46282785Sgjbclass HandleMarkCleaner: public StackObj {
47282785Sgjb private:
48282785Sgjb  Thread* _thread;
49282785Sgjb public:
50282785Sgjb  HandleMarkCleaner(Thread* thread) {
51282785Sgjb    _thread = thread;
52282785Sgjb    _thread->last_handle_mark()->push();
53282785Sgjb  }
54282785Sgjb  ~HandleMarkCleaner() {
55282785Sgjb    _thread->last_handle_mark()->pop_and_restore();
56282785Sgjb  }
57282785Sgjb
58282789Sgjb private:
59282789Sgjb  inline void* operator new(size_t size, void* ptr) throw() {
60282789Sgjb    return ptr;
61282785Sgjb  }
62282785Sgjb};
63282785Sgjb
64282787Sgjb// InterfaceSupport provides functionality used by the VM_LEAF_BASE and
65282787Sgjb// VM_ENTRY_BASE macros. These macros are used to guard entry points into
66282787Sgjb// the VM and perform checks upon leave of the VM.
67282787Sgjb
68282787Sgjb
69282792Sgjbclass InterfaceSupport: AllStatic {
70282792Sgjb# ifdef ASSERT
71282787Sgjb public:
72282791Sgjb  static long _scavenge_alot_counter;
73282793Sgjb  static long _fullgc_alot_counter;
74282793Sgjb  static long _number_of_calls;
75282793Sgjb  static long _fullgc_alot_invocation;
76282793Sgjb
77282791Sgjb  // tracing
78282787Sgjb  static void trace(const char* result_type, const char* header);
79282787Sgjb
80282785Sgjb  // Helper methods used to implement +ScavengeALot and +FullGCALot
81282785Sgjb  static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
82282785Sgjb  static void gc_alot();
83282785Sgjb
84282785Sgjb  static void walk_stack_from(vframe* start_vf);
85282785Sgjb  static void walk_stack();
86282785Sgjb
87282785Sgjb# ifdef ENABLE_ZAP_DEAD_LOCALS
88282785Sgjb  static void zap_dead_locals_old();
89282785Sgjb# endif
90282785Sgjb
91282785Sgjb  static void zombieAll();
92282785Sgjb  static void unlinkSymbols();
93282785Sgjb  static void deoptimizeAll();
94282796Sgjb  static void stress_derived_pointers();
95282794Sgjb  static void verify_stack();
96282795Sgjb  static void verify_last_frame();
97282785Sgjb# endif
98282785Sgjb
99282785Sgjb public:
100282785Sgjb  // OS dependent stuff
101282785Sgjb#ifdef TARGET_OS_FAMILY_linux
102282785Sgjb# include "interfaceSupport_linux.hpp"
103282785Sgjb#endif
104282787Sgjb#ifdef TARGET_OS_FAMILY_solaris
105282785Sgjb# include "interfaceSupport_solaris.hpp"
106282787Sgjb#endif
107282785Sgjb#ifdef TARGET_OS_FAMILY_windows
108282785Sgjb# include "interfaceSupport_windows.hpp"
109282785Sgjb#endif
110282785Sgjb#ifdef TARGET_OS_FAMILY_aix
111282785Sgjb# include "interfaceSupport_aix.hpp"
112282785Sgjb#endif
113282785Sgjb#ifdef TARGET_OS_FAMILY_bsd
114282785Sgjb# include "interfaceSupport_bsd.hpp"
115282785Sgjb#endif
116282785Sgjb
117282785Sgjb};
118282785Sgjb
119282785Sgjb
120282785Sgjb// Basic class for all thread transition classes.
121282785Sgjb
122282785Sgjbclass ThreadStateTransition : public StackObj {
123282785Sgjb protected:
124282785Sgjb  JavaThread* _thread;
125282785Sgjb public:
126282785Sgjb  ThreadStateTransition(JavaThread *thread) {
127282785Sgjb    _thread = thread;
128282785Sgjb    assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
129282785Sgjb  }
130282785Sgjb
131282785Sgjb  // Change threadstate in a manner, so safepoint can detect changes.
132282785Sgjb  // Time-critical: called on exit from every runtime routine
133282785Sgjb  static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
134282785Sgjb    assert(from != _thread_in_Java, "use transition_from_java");
135282785Sgjb    assert(from != _thread_in_native, "use transition_from_native");
136282785Sgjb    assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
137282785Sgjb    assert(thread->thread_state() == from, "coming from wrong thread state");
138282785Sgjb    // Change to transition state (assumes total store ordering!  -Urs)
139282785Sgjb    thread->set_thread_state((JavaThreadState)(from + 1));
140282785Sgjb
141282785Sgjb    // Make sure new state is seen by VM thread
142282785Sgjb    if (os::is_MP()) {
143282785Sgjb      if (UseMembar) {
144282789Sgjb        // Force a fence between the write above and read below
145282785Sgjb        OrderAccess::fence();
146      } else {
147        // store to serialize page so VM thread can do pseudo remote membar
148        os::write_memory_serialize_page(thread);
149      }
150    }
151
152    if (SafepointSynchronize::do_call_back()) {
153      SafepointSynchronize::block(thread);
154    }
155    thread->set_thread_state(to);
156
157    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
158  }
159
160  // transition_and_fence must be used on any thread state transition
161  // where there might not be a Java call stub on the stack, in
162  // particular on Windows where the Structured Exception Handler is
163  // set up in the call stub. os::write_memory_serialize_page() can
164  // fault and we can't recover from it on Windows without a SEH in
165  // place.
166  static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
167    assert(thread->thread_state() == from, "coming from wrong thread state");
168    assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
169    // Change to transition state (assumes total store ordering!  -Urs)
170    thread->set_thread_state((JavaThreadState)(from + 1));
171
172    // Make sure new state is seen by VM thread
173    if (os::is_MP()) {
174      if (UseMembar) {
175        // Force a fence between the write above and read below
176        OrderAccess::fence();
177      } else {
178        // Must use this rather than serialization page in particular on Windows
179        InterfaceSupport::serialize_memory(thread);
180      }
181    }
182
183    if (SafepointSynchronize::do_call_back()) {
184      SafepointSynchronize::block(thread);
185    }
186    thread->set_thread_state(to);
187
188    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
189  }
190
191  // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
192  // never block on entry to the VM. This will break the code, since e.g. preserve arguments
193  // have not been setup.
194  static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
195    assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
196    thread->set_thread_state(to);
197  }
198
199  static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
200    assert((to & 1) == 0, "odd numbers are transitions states");
201    assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
202    // Change to transition state (assumes total store ordering!  -Urs)
203    thread->set_thread_state(_thread_in_native_trans);
204
205    // Make sure new state is seen by GC thread
206    if (os::is_MP()) {
207      if (UseMembar) {
208        // Force a fence between the write above and read below
209        OrderAccess::fence();
210      } else {
211        // Must use this rather than serialization page in particular on Windows
212        InterfaceSupport::serialize_memory(thread);
213      }
214    }
215
216    // We never install asynchronous exceptions when coming (back) in
217    // to the runtime from native code because the runtime is not set
218    // up to handle exceptions floating around at arbitrary points.
219    if (SafepointSynchronize::do_call_back() || thread->is_suspend_after_native()) {
220      JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
221
222      // Clear unhandled oops anywhere where we could block, even if we don't.
223      CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
224    }
225
226    thread->set_thread_state(to);
227  }
228 protected:
229   void trans(JavaThreadState from, JavaThreadState to)  { transition(_thread, from, to); }
230   void trans_from_java(JavaThreadState to)              { transition_from_java(_thread, to); }
231   void trans_from_native(JavaThreadState to)            { transition_from_native(_thread, to); }
232   void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
233};
234
235
236class ThreadInVMfromJava : public ThreadStateTransition {
237 public:
238  ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
239    trans_from_java(_thread_in_vm);
240  }
241  ~ThreadInVMfromJava()  {
242    trans(_thread_in_vm, _thread_in_Java);
243    // Check for pending. async. exceptions or suspends.
244    if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
245  }
246};
247
248
249class ThreadInVMfromUnknown {
250 private:
251  JavaThread* _thread;
252 public:
253  ThreadInVMfromUnknown() : _thread(NULL) {
254    Thread* t = Thread::current();
255    if (t->is_Java_thread()) {
256      JavaThread* t2 = (JavaThread*) t;
257      if (t2->thread_state() == _thread_in_native) {
258        _thread = t2;
259        ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
260        // Used to have a HandleMarkCleaner but that is dangerous as
261        // it could free a handle in our (indirect, nested) caller.
262        // We expect any handles will be short lived and figure we
263        // don't need an actual HandleMark.
264      }
265    }
266  }
267  ~ThreadInVMfromUnknown()  {
268    if (_thread) {
269      ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
270    }
271  }
272};
273
274
275class ThreadInVMfromNative : public ThreadStateTransition {
276 public:
277  ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
278    trans_from_native(_thread_in_vm);
279  }
280  ~ThreadInVMfromNative() {
281    trans_and_fence(_thread_in_vm, _thread_in_native);
282  }
283};
284
285
286class ThreadToNativeFromVM : public ThreadStateTransition {
287 public:
288  ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
289    // We are leaving the VM at this point and going directly to native code.
290    // Block, if we are in the middle of a safepoint synchronization.
291    assert(!thread->owns_locks(), "must release all locks when leaving VM");
292    thread->frame_anchor()->make_walkable(thread);
293    trans_and_fence(_thread_in_vm, _thread_in_native);
294    // Check for pending. async. exceptions or suspends.
295    if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
296  }
297
298  ~ThreadToNativeFromVM() {
299    trans_from_native(_thread_in_vm);
300    // We don't need to clear_walkable because it will happen automagically when we return to java
301  }
302};
303
304
305class ThreadBlockInVM : public ThreadStateTransition {
306 public:
307  ThreadBlockInVM(JavaThread *thread)
308  : ThreadStateTransition(thread) {
309    // Once we are blocked vm expects stack to be walkable
310    thread->frame_anchor()->make_walkable(thread);
311    trans_and_fence(_thread_in_vm, _thread_blocked);
312  }
313  ~ThreadBlockInVM() {
314    trans_and_fence(_thread_blocked, _thread_in_vm);
315    // We don't need to clear_walkable because it will happen automagically when we return to java
316  }
317};
318
319
320// This special transition class is only used to prevent asynchronous exceptions
321// from being installed on vm exit in situations where we can't tolerate them.
322// See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
323class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
324 public:
325  ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
326    trans_from_java(_thread_in_vm);
327  }
328  ~ThreadInVMfromJavaNoAsyncException()  {
329    trans(_thread_in_vm, _thread_in_Java);
330    // NOTE: We do not check for pending. async. exceptions.
331    // If we did and moved the pending async exception over into the
332    // pending exception field, we would need to deopt (currently C2
333    // only). However, to do so would require that we transition back
334    // to the _thread_in_vm state. Instead we postpone the handling of
335    // the async exception.
336
337    // Check for pending. suspends only.
338    if (_thread->has_special_runtime_exit_condition())
339      _thread->handle_special_runtime_exit_condition(false);
340  }
341};
342
343// Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
344// Can be used to verify properties on enter/exit of the VM.
345
346#ifdef ASSERT
347class VMEntryWrapper {
348 public:
349  VMEntryWrapper() {
350    if (VerifyLastFrame) {
351      InterfaceSupport::verify_last_frame();
352    }
353  }
354
355  ~VMEntryWrapper() {
356    InterfaceSupport::check_gc_alot();
357    if (WalkStackALot) {
358      InterfaceSupport::walk_stack();
359    }
360#ifdef ENABLE_ZAP_DEAD_LOCALS
361    if (ZapDeadLocalsOld) {
362      InterfaceSupport::zap_dead_locals_old();
363    }
364#endif
365#ifdef COMPILER2
366    // This option is not used by Compiler 1
367    if (StressDerivedPointers) {
368      InterfaceSupport::stress_derived_pointers();
369    }
370#endif
371    if (DeoptimizeALot || DeoptimizeRandom) {
372      InterfaceSupport::deoptimizeAll();
373    }
374    if (ZombieALot) {
375      InterfaceSupport::zombieAll();
376    }
377    if (UnlinkSymbolsALot) {
378      InterfaceSupport::unlinkSymbols();
379    }
380    // do verification AFTER potential deoptimization
381    if (VerifyStack) {
382      InterfaceSupport::verify_stack();
383    }
384
385  }
386};
387
388
389class VMNativeEntryWrapper {
390 public:
391  VMNativeEntryWrapper() {
392    if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
393  }
394
395  ~VMNativeEntryWrapper() {
396    if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
397  }
398};
399
400#endif
401
402
403// VM-internal runtime interface support
404
405#ifdef ASSERT
406
407class RuntimeHistogramElement : public HistogramElement {
408  public:
409   RuntimeHistogramElement(const char* name);
410};
411
412#define TRACE_CALL(result_type, header)                            \
413  InterfaceSupport::_number_of_calls++;                            \
414  if (TraceRuntimeCalls)                                           \
415    InterfaceSupport::trace(#result_type, #header);                \
416  if (CountRuntimeCalls) {                                         \
417    static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
418    if (e != NULL) e->increment_count();                           \
419  }
420#else
421#define TRACE_CALL(result_type, header)                            \
422  /* do nothing */
423#endif
424
425
426// LEAF routines do not lock, GC or throw exceptions
427
428#define VM_LEAF_BASE(result_type, header)                            \
429  TRACE_CALL(result_type, header)                                    \
430  debug_only(NoHandleMark __hm;)                                     \
431  os::verify_stack_alignment();                                      \
432  /* begin of body */
433
434
435// ENTRY routines may lock, GC and throw exceptions
436
437#define VM_ENTRY_BASE(result_type, header, thread)                   \
438  TRACE_CALL(result_type, header)                                    \
439  HandleMarkCleaner __hm(thread);                                    \
440  Thread* THREAD = thread;                                           \
441  os::verify_stack_alignment();                                      \
442  /* begin of body */
443
444
445// QUICK_ENTRY routines behave like ENTRY but without a handle mark
446
447#define VM_QUICK_ENTRY_BASE(result_type, header, thread)             \
448  TRACE_CALL(result_type, header)                                    \
449  debug_only(NoHandleMark __hm;)                                     \
450  Thread* THREAD = thread;                                           \
451  os::verify_stack_alignment();                                      \
452  /* begin of body */
453
454
455// Definitions for IRT (Interpreter Runtime)
456// (thread is an argument passed in to all these routines)
457
458#define IRT_ENTRY(result_type, header)                               \
459  result_type header {                                               \
460    ThreadInVMfromJava __tiv(thread);                                \
461    VM_ENTRY_BASE(result_type, header, thread)                       \
462    debug_only(VMEntryWrapper __vew;)
463
464
465#define IRT_LEAF(result_type, header)                                \
466  result_type header {                                               \
467    VM_LEAF_BASE(result_type, header)                                \
468    debug_only(No_Safepoint_Verifier __nspv(true);)
469
470
471#define IRT_ENTRY_NO_ASYNC(result_type, header)                      \
472  result_type header {                                               \
473    ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
474    VM_ENTRY_BASE(result_type, header, thread)                       \
475    debug_only(VMEntryWrapper __vew;)
476
477#define IRT_END }
478
479
480// Definitions for JRT (Java (Compiler/Shared) Runtime)
481
482#define JRT_ENTRY(result_type, header)                               \
483  result_type header {                                               \
484    ThreadInVMfromJava __tiv(thread);                                \
485    VM_ENTRY_BASE(result_type, header, thread)                       \
486    debug_only(VMEntryWrapper __vew;)
487
488
489#define JRT_LEAF(result_type, header)                                \
490  result_type header {                                               \
491  VM_LEAF_BASE(result_type, header)                                  \
492  debug_only(JRT_Leaf_Verifier __jlv;)
493
494
495#define JRT_ENTRY_NO_ASYNC(result_type, header)                      \
496  result_type header {                                               \
497    ThreadInVMfromJavaNoAsyncException __tiv(thread);                \
498    VM_ENTRY_BASE(result_type, header, thread)                       \
499    debug_only(VMEntryWrapper __vew;)
500
501// Same as JRT Entry but allows for return value after the safepoint
502// to get back into Java from the VM
503#define JRT_BLOCK_ENTRY(result_type, header)                         \
504  result_type header {                                               \
505    TRACE_CALL(result_type, header)                                  \
506    HandleMarkCleaner __hm(thread);
507
508#define JRT_BLOCK                                                    \
509    {                                                                \
510    ThreadInVMfromJava __tiv(thread);                                \
511    Thread* THREAD = thread;                                         \
512    debug_only(VMEntryWrapper __vew;)
513
514#define JRT_BLOCK_END }
515
516#define JRT_END }
517
518// Definitions for JNI
519
520#define JNI_ENTRY(result_type, header)                               \
521    JNI_ENTRY_NO_PRESERVE(result_type, header)                       \
522    WeakPreserveExceptionMark __wem(thread);
523
524#define JNI_ENTRY_NO_PRESERVE(result_type, header)             \
525extern "C" {                                                         \
526  result_type JNICALL header {                                \
527    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
528    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
529    ThreadInVMfromNative __tiv(thread);                              \
530    debug_only(VMNativeEntryWrapper __vew;)                          \
531    VM_ENTRY_BASE(result_type, header, thread)
532
533
534// Ensure that the VMNativeEntryWrapper constructor, which can cause
535// a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
536#define JNI_QUICK_ENTRY(result_type, header)                         \
537extern "C" {                                                         \
538  result_type JNICALL header {                                \
539    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
540    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
541    ThreadInVMfromNative __tiv(thread);                              \
542    debug_only(VMNativeEntryWrapper __vew;)                          \
543    VM_QUICK_ENTRY_BASE(result_type, header, thread)
544
545
546#define JNI_LEAF(result_type, header)                                \
547extern "C" {                                                         \
548  result_type JNICALL header {                                \
549    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
550    assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
551    VM_LEAF_BASE(result_type, header)
552
553
554// Close the routine and the extern "C"
555#define JNI_END } }
556
557
558
559// Definitions for JVM
560
561#define JVM_ENTRY(result_type, header)                               \
562extern "C" {                                                         \
563  result_type JNICALL header {                                       \
564    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
565    ThreadInVMfromNative __tiv(thread);                              \
566    debug_only(VMNativeEntryWrapper __vew;)                          \
567    VM_ENTRY_BASE(result_type, header, thread)
568
569
570#define JVM_ENTRY_NO_ENV(result_type, header)                        \
571extern "C" {                                                         \
572  result_type JNICALL header {                                       \
573    JavaThread* thread = (JavaThread*)ThreadLocalStorage::thread();  \
574    ThreadInVMfromNative __tiv(thread);                              \
575    debug_only(VMNativeEntryWrapper __vew;)                          \
576    VM_ENTRY_BASE(result_type, header, thread)
577
578
579#define JVM_QUICK_ENTRY(result_type, header)                         \
580extern "C" {                                                         \
581  result_type JNICALL header {                                       \
582    JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
583    ThreadInVMfromNative __tiv(thread);                              \
584    debug_only(VMNativeEntryWrapper __vew;)                          \
585    VM_QUICK_ENTRY_BASE(result_type, header, thread)
586
587
588#define JVM_LEAF(result_type, header)                                \
589extern "C" {                                                         \
590  result_type JNICALL header {                                       \
591    VM_Exit::block_if_vm_exited();                                   \
592    VM_LEAF_BASE(result_type, header)
593
594
595#define JVM_END } }
596
597#endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
598