thread.hpp revision 470:ad8c8ca4ab0f
1169689Skan/* 2169689Skan * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 3169689Skan * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4169689Skan * 5169689Skan * This code is free software; you can redistribute it and/or modify it 6169689Skan * under the terms of the GNU General Public License version 2 only, as 7169689Skan * published by the Free Software Foundation. 8169689Skan * 9169689Skan * This code is distributed in the hope that it will be useful, but WITHOUT 10169689Skan * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11169689Skan * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12169689Skan * version 2 for more details (a copy is included in the LICENSE file that 13169689Skan * accompanied this code). 14169689Skan * 15169689Skan * You should have received a copy of the GNU General Public License version 16169689Skan * 2 along with this work; if not, write to the Free Software Foundation, 17169689Skan * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18169689Skan * 19169689Skan * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20169689Skan * CA 95054 USA or visit www.sun.com if you need additional information or 21169689Skan * have any questions. 22169689Skan * 23169689Skan */ 24169689Skan 25169689Skanclass ThreadSafepointState; 26169689Skanclass ThreadProfiler; 27169689Skan 28169689Skanclass JvmtiThreadState; 29169689Skanclass JvmtiGetLoadedClassesClosure; 30169689Skanclass ThreadStatistics; 31169689Skanclass ConcurrentLocksDump; 32169689Skanclass ParkEvent ; 33169689Skan 34169689Skanclass ciEnv; 35169689Skanclass CompileThread; 36169689Skanclass CompileLog; 37169689Skanclass CompileTask; 38169689Skanclass CompileQueue; 39169689Skanclass CompilerCounters; 40169689Skanclass vframeArray; 41169689Skan 42169689Skanclass DeoptResourceMark; 43169689Skanclass jvmtiDeferredLocalVariableSet; 44169689Skan 45169689Skanclass GCTaskQueue; 46169689Skanclass ThreadClosure; 47169689Skanclass IdealGraphPrinter; 48169689Skan 49169689Skan// Class hierarchy 50169689Skan// - Thread 51169689Skan// - VMThread 52169689Skan// - JavaThread 53169689Skan// - WatcherThread 54169689Skan 55169689Skanclass Thread: public ThreadShadow { 56169689Skan friend class VMStructs; 57169689Skan private: 58169689Skan // Exception handling 59169689Skan // (Note: _pending_exception and friends are in ThreadShadow) 60169689Skan //oop _pending_exception; // pending exception for current thread 61169689Skan // const char* _exception_file; // file information for exception (debugging only) 62169689Skan // int _exception_line; // line information for exception (debugging only) 63169689Skan 64169689Skan // Support for forcing alignment of thread objects for biased locking 65169689Skan void* _real_malloc_address; 66169689Skan public: 67169689Skan void* operator new(size_t size); 68169689Skan void operator delete(void* p); 69169689Skan private: 70169689Skan 71169689Skan // *************************************************************** 72169689Skan // Suspend and resume support 73169689Skan // *************************************************************** 74169689Skan // 75169689Skan // VM suspend/resume no longer exists - it was once used for various 76169689Skan // things including safepoints but was deprecated and finally removed 77169689Skan // in Java 7. Because VM suspension was considered "internal" Java-level 78169689Skan // suspension was considered "external", and this legacy naming scheme 79169689Skan // remains. 80169689Skan // 81169689Skan // External suspend/resume requests come from JVM_SuspendThread, 82169689Skan // JVM_ResumeThread, JVMTI SuspendThread, and finally JVMTI 83169689Skan // ResumeThread. External 84169689Skan // suspend requests cause _external_suspend to be set and external 85169689Skan // resume requests cause _external_suspend to be cleared. 86169689Skan // External suspend requests do not nest on top of other external 87169689Skan // suspend requests. The higher level APIs reject suspend requests 88169689Skan // for already suspended threads. 89169689Skan // 90169689Skan // The external_suspend 91169689Skan // flag is checked by has_special_runtime_exit_condition() and java thread 92169689Skan // will self-suspend when handle_special_runtime_exit_condition() is 93169689Skan // called. Most uses of the _thread_blocked state in JavaThreads are 94169689Skan // considered the same as being externally suspended; if the blocking 95169689Skan // condition lifts, the JavaThread will self-suspend. Other places 96169689Skan // where VM checks for external_suspend include: 97169689Skan // + mutex granting (do not enter monitors when thread is suspended) 98169689Skan // + state transitions from _thread_in_native 99169689Skan // 100169689Skan // In general, java_suspend() does not wait for an external suspend 101169689Skan // request to complete. When it returns, the only guarantee is that 102169689Skan // the _external_suspend field is true. 103169689Skan // 104169689Skan // wait_for_ext_suspend_completion() is used to wait for an external 105169689Skan // suspend request to complete. External suspend requests are usually 106169689Skan // followed by some other interface call that requires the thread to 107169689Skan // be quiescent, e.g., GetCallTrace(). By moving the "wait time" into 108169689Skan // the interface that requires quiescence, we give the JavaThread a 109169689Skan // chance to self-suspend before we need it to be quiescent. This 110169689Skan // improves overall suspend/query performance. 111169689Skan // 112169689Skan // _suspend_flags controls the behavior of java_ suspend/resume. 113169689Skan // It must be set under the protection of SR_lock. Read from the flag is 114169689Skan // OK without SR_lock as long as the value is only used as a hint. 115169689Skan // (e.g., check _external_suspend first without lock and then recheck 116169689Skan // inside SR_lock and finish the suspension) 117169689Skan // 118169689Skan // _suspend_flags is also overloaded for other "special conditions" so 119169689Skan // that a single check indicates whether any special action is needed 120169689Skan // eg. for async exceptions. 121169689Skan // ------------------------------------------------------------------- 122169689Skan // Notes: 123169689Skan // 1. The suspend/resume logic no longer uses ThreadState in OSThread 124169689Skan // but we still update its value to keep other part of the system (mainly 125169689Skan // JVMTI) happy. ThreadState is legacy code (see notes in 126169689Skan // osThread.hpp). 127169689Skan // 128169689Skan // 2. It would be more natural if set_external_suspend() is private and 129169689Skan // part of java_suspend(), but that probably would affect the suspend/query 130169689Skan // performance. Need more investigation on this. 131169689Skan // 132169689Skan 133169689Skan // suspend/resume lock: used for self-suspend 134169689Skan Monitor* _SR_lock; 135169689Skan 136169689Skan protected: 137169689Skan enum SuspendFlags { 138169689Skan // NOTE: avoid using the sign-bit as cc generates different test code 139169689Skan // when the sign-bit is used, and sometimes incorrectly - see CR 6398077 140169689Skan 141169689Skan _external_suspend = 0x20000000U, // thread is asked to self suspend 142169689Skan _ext_suspended = 0x40000000U, // thread has self-suspended 143169689Skan _deopt_suspend = 0x10000000U, // thread needs to self suspend for deopt 144169689Skan 145169689Skan _has_async_exception = 0x00000001U // there is a pending async exception 146169689Skan }; 147169689Skan 148169689Skan // various suspension related flags - atomically updated 149169689Skan // overloaded for async exception checking in check_special_condition_for_native_trans. 150169689Skan volatile uint32_t _suspend_flags; 151169689Skan 152169689Skan private: 153169689Skan int _num_nested_signal; 154169689Skan 155169689Skan public: 156169689Skan void enter_signal_handler() { _num_nested_signal++; } 157169689Skan void leave_signal_handler() { _num_nested_signal--; } 158169689Skan bool is_inside_signal_handler() const { return _num_nested_signal > 0; } 159169689Skan 160169689Skan private: 161169689Skan // Debug tracing 162169689Skan static void trace(const char* msg, const Thread* const thread) PRODUCT_RETURN; 163169689Skan 164169689Skan // Active_handles points to a block of handles 165169689Skan JNIHandleBlock* _active_handles; 166169689Skan 167169689Skan // One-element thread local free list 168169689Skan JNIHandleBlock* _free_handle_block; 169169689Skan 170169689Skan // Point to the last handle mark 171169689Skan HandleMark* _last_handle_mark; 172169689Skan 173169689Skan // The parity of the last strong_roots iteration in which this thread was 174169689Skan // claimed as a task. 175169689Skan jint _oops_do_parity; 176169689Skan 177169689Skan public: 178169689Skan void set_last_handle_mark(HandleMark* mark) { _last_handle_mark = mark; } 179169689Skan HandleMark* last_handle_mark() const { return _last_handle_mark; } 180169689Skan private: 181169689Skan 182169689Skan // debug support for checking if code does allow safepoints or not 183169689Skan // GC points in the VM can happen because of allocation, invoking a VM operation, or blocking on 184169689Skan // mutex, or blocking on an object synchronizer (Java locking). 185169689Skan // If !allow_safepoint(), then an assertion failure will happen in any of the above cases 186169689Skan // If !allow_allocation(), then an assertion failure will happen during allocation 187169689Skan // (Hence, !allow_safepoint() => !allow_allocation()). 188169689Skan // 189169689Skan // The two classes No_Safepoint_Verifier and No_Allocation_Verifier are used to set these counters. 190169689Skan // 191169689Skan NOT_PRODUCT(int _allow_safepoint_count;) // If 0, thread allow a safepoint to happen 192169689Skan debug_only (int _allow_allocation_count;) // If 0, the thread is allowed to allocate oops. 193169689Skan 194169689Skan // Record when GC is locked out via the GC_locker mechanism 195169689Skan CHECK_UNHANDLED_OOPS_ONLY(int _gc_locked_out_count;) 196169689Skan 197169689Skan friend class No_Alloc_Verifier; 198169689Skan friend class No_Safepoint_Verifier; 199169689Skan friend class Pause_No_Safepoint_Verifier; 200169689Skan friend class ThreadLocalStorage; 201169689Skan friend class GC_locker; 202169689Skan 203169689Skan // In order for all threads to be able to use fast locking, we need to know the highest stack 204169689Skan // address of where a lock is on the stack (stacks normally grow towards lower addresses). This 205169689Skan // variable is initially set to NULL, indicating no locks are used by the thread. During the thread's 206169689Skan // execution, it will be set whenever locking can happen, i.e., when we call out to Java code or use 207169689Skan // an ObjectLocker. The value is never decreased, hence, it will over the lifetime of a thread 208169689Skan // approximate the real stackbase. 209169689Skan address _highest_lock; // Highest stack address where a JavaLock exist 210169689Skan 211169689Skan ThreadLocalAllocBuffer _tlab; // Thread-local eden 212169689Skan 213169689Skan int _vm_operation_started_count; // VM_Operation support 214169689Skan int _vm_operation_completed_count; // VM_Operation support 215169689Skan 216169689Skan ObjectMonitor* _current_pending_monitor; // ObjectMonitor this thread 217169689Skan // is waiting to lock 218169689Skan bool _current_pending_monitor_is_from_java; // locking is from Java code 219169689Skan 220169689Skan // ObjectMonitor on which this thread called Object.wait() 221169689Skan ObjectMonitor* _current_waiting_monitor; 222169689Skan 223169689Skan // Private thread-local objectmonitor list - a simple cache organized as a SLL. 224169689Skan public: 225169689Skan ObjectMonitor * omFreeList ; 226169689Skan int omFreeCount ; // length of omFreeList 227169689Skan int omFreeProvision ; // reload chunk size 228169689Skan 229169689Skan public: 230169689Skan enum { 231169689Skan is_definitely_current_thread = true 232169689Skan }; 233169689Skan 234169689Skan // Constructor 235169689Skan Thread(); 236169689Skan virtual ~Thread(); 237169689Skan 238169689Skan // initializtion 239169689Skan void initialize_thread_local_storage(); 240169689Skan 241169689Skan // thread entry point 242169689Skan virtual void run(); 243169689Skan 244169689Skan // Testers 245169689Skan virtual bool is_VM_thread() const { return false; } 246169689Skan virtual bool is_Java_thread() const { return false; } 247169689Skan // Remove this ifdef when C1 is ported to the compiler interface. 248169689Skan virtual bool is_Compiler_thread() const { return false; } 249169689Skan virtual bool is_hidden_from_external_view() const { return false; } 250169689Skan virtual bool is_jvmti_agent_thread() const { return false; } 251169689Skan // True iff the thread can perform GC operations at a safepoint. 252169689Skan // Generally will be true only of VM thread and parallel GC WorkGang 253169689Skan // threads. 254169689Skan virtual bool is_GC_task_thread() const { return false; } 255169689Skan virtual bool is_Watcher_thread() const { return false; } 256169689Skan virtual bool is_ConcurrentGC_thread() const { return false; } 257169689Skan 258169689Skan virtual char* name() const { return (char*)"Unknown thread"; } 259169689Skan 260169689Skan // Returns the current thread 261169689Skan static inline Thread* current(); 262169689Skan 263169689Skan // Common thread operations 264169689Skan static void set_priority(Thread* thread, ThreadPriority priority); 265169689Skan static ThreadPriority get_priority(const Thread* const thread); 266169689Skan static void start(Thread* thread); 267169689Skan static void interrupt(Thread* thr); 268169689Skan static bool is_interrupted(Thread* thr, bool clear_interrupted); 269169689Skan 270169689Skan Monitor* SR_lock() const { return _SR_lock; } 271169689Skan 272169689Skan bool has_async_exception() const { return (_suspend_flags & _has_async_exception) != 0; } 273169689Skan 274169689Skan void set_suspend_flag(SuspendFlags f) { 275169689Skan assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); 276169689Skan uint32_t flags; 277169689Skan do { 278169689Skan flags = _suspend_flags; 279169689Skan } 280169689Skan while (Atomic::cmpxchg((jint)(flags | f), 281169689Skan (volatile jint*)&_suspend_flags, 282169689Skan (jint)flags) != (jint)flags); 283169689Skan } 284169689Skan void clear_suspend_flag(SuspendFlags f) { 285169689Skan assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); 286169689Skan uint32_t flags; 287169689Skan do { 288169689Skan flags = _suspend_flags; 289169689Skan } 290169689Skan while (Atomic::cmpxchg((jint)(flags & ~f), 291169689Skan (volatile jint*)&_suspend_flags, 292169689Skan (jint)flags) != (jint)flags); 293169689Skan } 294169689Skan 295169689Skan void set_has_async_exception() { 296169689Skan set_suspend_flag(_has_async_exception); 297169689Skan } 298169689Skan void clear_has_async_exception() { 299169689Skan clear_suspend_flag(_has_async_exception); 300169689Skan } 301169689Skan 302169689Skan // Support for Unhandled Oop detection 303169689Skan#ifdef CHECK_UNHANDLED_OOPS 304169689Skan private: 305169689Skan UnhandledOops *_unhandled_oops; 306169689Skan public: 307169689Skan UnhandledOops* unhandled_oops() { return _unhandled_oops; } 308169689Skan // Mark oop safe for gc. It may be stack allocated but won't move. 309169689Skan void allow_unhandled_oop(oop *op) { 310169689Skan if (CheckUnhandledOops) unhandled_oops()->allow_unhandled_oop(op); 311169689Skan } 312169689Skan // Clear oops at safepoint so crashes point to unhandled oop violator 313169689Skan void clear_unhandled_oops() { 314169689Skan if (CheckUnhandledOops) unhandled_oops()->clear_unhandled_oops(); 315169689Skan } 316169689Skan bool is_gc_locked_out() { return _gc_locked_out_count > 0; } 317169689Skan#endif // CHECK_UNHANDLED_OOPS 318169689Skan 319169689Skan public: 320169689Skan // Installs a pending exception to be inserted later 321169689Skan static void send_async_exception(oop thread_oop, oop java_throwable); 322169689Skan 323169689Skan // Resource area 324169689Skan ResourceArea* resource_area() const { return _resource_area; } 325169689Skan void set_resource_area(ResourceArea* area) { _resource_area = area; } 326169689Skan 327169689Skan OSThread* osthread() const { return _osthread; } 328169689Skan void set_osthread(OSThread* thread) { _osthread = thread; } 329169689Skan 330169689Skan // JNI handle support 331169689Skan JNIHandleBlock* active_handles() const { return _active_handles; } 332169689Skan void set_active_handles(JNIHandleBlock* block) { _active_handles = block; } 333169689Skan JNIHandleBlock* free_handle_block() const { return _free_handle_block; } 334169689Skan void set_free_handle_block(JNIHandleBlock* block) { _free_handle_block = block; } 335169689Skan 336169689Skan // Internal handle support 337169689Skan HandleArea* handle_area() const { return _handle_area; } 338169689Skan void set_handle_area(HandleArea* area) { _handle_area = area; } 339169689Skan 340169689Skan // Thread-Local Allocation Buffer (TLAB) support 341169689Skan ThreadLocalAllocBuffer& tlab() { return _tlab; } 342169689Skan void initialize_tlab() { 343169689Skan if (UseTLAB) { 344169689Skan tlab().initialize(); 345169689Skan } 346169689Skan } 347169689Skan 348169689Skan // VM operation support 349169689Skan int vm_operation_ticket() { return ++_vm_operation_started_count; } 350169689Skan int vm_operation_completed_count() { return _vm_operation_completed_count; } 351169689Skan void increment_vm_operation_completed_count() { _vm_operation_completed_count++; } 352169689Skan 353169689Skan // For tracking the heavyweight monitor the thread is pending on. 354169689Skan ObjectMonitor* current_pending_monitor() { 355169689Skan return _current_pending_monitor; 356169689Skan } 357169689Skan void set_current_pending_monitor(ObjectMonitor* monitor) { 358169689Skan _current_pending_monitor = monitor; 359169689Skan } 360169689Skan void set_current_pending_monitor_is_from_java(bool from_java) { 361169689Skan _current_pending_monitor_is_from_java = from_java; 362169689Skan } 363169689Skan bool current_pending_monitor_is_from_java() { 364169689Skan return _current_pending_monitor_is_from_java; 365169689Skan } 366169689Skan 367169689Skan // For tracking the ObjectMonitor on which this thread called Object.wait() 368169689Skan ObjectMonitor* current_waiting_monitor() { 369169689Skan return _current_waiting_monitor; 370169689Skan } 371169689Skan void set_current_waiting_monitor(ObjectMonitor* monitor) { 372169689Skan _current_waiting_monitor = monitor; 373169689Skan } 374169689Skan 375169689Skan // GC support 376169689Skan // Apply "f->do_oop" to all root oops in "this". 377169689Skan void oops_do(OopClosure* f); 378169689Skan 379169689Skan // Handles the parallel case for the method below. 380169689Skanprivate: 381169689Skan bool claim_oops_do_par_case(int collection_parity); 382169689Skanpublic: 383169689Skan // Requires that "collection_parity" is that of the current strong roots 384169689Skan // iteration. If "is_par" is false, sets the parity of "this" to 385169689Skan // "collection_parity", and returns "true". If "is_par" is true, 386169689Skan // uses an atomic instruction to set the current threads parity to 387169689Skan // "collection_parity", if it is not already. Returns "true" iff the 388169689Skan // calling thread does the update, this indicates that the calling thread 389169689Skan // has claimed the thread's stack as a root groop in the current 390169689Skan // collection. 391169689Skan bool claim_oops_do(bool is_par, int collection_parity) { 392169689Skan if (!is_par) { 393169689Skan _oops_do_parity = collection_parity; 394169689Skan return true; 395169689Skan } else { 396169689Skan return claim_oops_do_par_case(collection_parity); 397169689Skan } 398169689Skan } 399169689Skan 400169689Skan // Sweeper support 401169689Skan void nmethods_do(); 402169689Skan 403169689Skan // Fast-locking support 404169689Skan address highest_lock() const { return _highest_lock; } 405169689Skan void update_highest_lock(address base) { if (base > _highest_lock) _highest_lock = base; } 406169689Skan 407169689Skan // Tells if adr belong to this thread. This is used 408169689Skan // for checking if a lock is owned by the running thread. 409169689Skan // Warning: the method can only be used on the running thread 410169689Skan // Fast lock support uses these methods 411169689Skan virtual bool lock_is_in_stack(address adr) const; 412169689Skan virtual bool is_lock_owned(address adr) const; 413169689Skan 414169689Skan // Check if address is in the stack of the thread (not just for locks). 415169689Skan bool is_in_stack(address adr) const; 416169689Skan 417169689Skan // Sets this thread as starting thread. Returns failure if thread 418169689Skan // creation fails due to lack of memory, too many threads etc. 419169689Skan bool set_as_starting_thread(); 420169689Skan 421169689Skan protected: 422169689Skan // OS data associated with the thread 423169689Skan OSThread* _osthread; // Platform-specific thread information 424169689Skan 425169689Skan // Thread local resource area for temporary allocation within the VM 426169689Skan ResourceArea* _resource_area; 427169689Skan 428169689Skan // Thread local handle area for allocation of handles within the VM 429169689Skan HandleArea* _handle_area; 430169689Skan 431169689Skan // Support for stack overflow handling, get_thread, etc. 432169689Skan address _stack_base; 433169689Skan size_t _stack_size; 434169689Skan uintptr_t _self_raw_id; // used by get_thread (mutable) 435169689Skan int _lgrp_id; 436169689Skan 437169689Skan public: 438169689Skan // Stack overflow support 439169689Skan address stack_base() const { assert(_stack_base != NULL,"Sanity check"); return _stack_base; } 440169689Skan 441169689Skan void set_stack_base(address base) { _stack_base = base; } 442169689Skan size_t stack_size() const { return _stack_size; } 443169689Skan void set_stack_size(size_t size) { _stack_size = size; } 444169689Skan void record_stack_base_and_size(); 445169689Skan 446169689Skan int lgrp_id() const { return _lgrp_id; } 447169689Skan void set_lgrp_id(int value) { _lgrp_id = value; } 448169689Skan 449169689Skan // Printing 450169689Skan void print_on(outputStream* st) const; 451169689Skan void print() const { print_on(tty); } 452169689Skan virtual void print_on_error(outputStream* st, char* buf, int buflen) const; 453169689Skan 454169689Skan // Debug-only code 455169689Skan 456169689Skan#ifdef ASSERT 457169689Skan private: 458169689Skan // Deadlock detection support for Mutex locks. List of locks own by thread. 459169689Skan Monitor *_owned_locks; 460169689Skan // Mutex::set_owner_implementation is the only place where _owned_locks is modified, 461169689Skan // thus the friendship 462169689Skan friend class Mutex; 463169689Skan friend class Monitor; 464169689Skan 465169689Skan public: 466169689Skan void print_owned_locks_on(outputStream* st) const; 467169689Skan void print_owned_locks() const { print_owned_locks_on(tty); } 468169689Skan Monitor * owned_locks() const { return _owned_locks; } 469169689Skan bool owns_locks() const { return owned_locks() != NULL; } 470169689Skan bool owns_locks_but_compiled_lock() const; 471169689Skan 472169689Skan // Deadlock detection 473169689Skan bool allow_allocation() { return _allow_allocation_count == 0; } 474169689Skan#endif 475169689Skan 476169689Skan void check_for_valid_safepoint_state(bool potential_vm_operation) PRODUCT_RETURN; 477169689Skan 478169689Skan private: 479169689Skan volatile int _jvmti_env_iteration_count; 480169689Skan 481169689Skan public: 482169689Skan void entering_jvmti_env_iteration() { ++_jvmti_env_iteration_count; } 483169689Skan void leaving_jvmti_env_iteration() { --_jvmti_env_iteration_count; } 484169689Skan bool is_inside_jvmti_env_iteration() { return _jvmti_env_iteration_count > 0; } 485169689Skan 486169689Skan // Code generation 487169689Skan static ByteSize exception_file_offset() { return byte_offset_of(Thread, _exception_file ); } 488169689Skan static ByteSize exception_line_offset() { return byte_offset_of(Thread, _exception_line ); } 489169689Skan static ByteSize active_handles_offset() { return byte_offset_of(Thread, _active_handles ); } 490169689Skan 491169689Skan static ByteSize stack_base_offset() { return byte_offset_of(Thread, _stack_base ); } 492169689Skan static ByteSize stack_size_offset() { return byte_offset_of(Thread, _stack_size ); } 493169689Skan static ByteSize omFreeList_offset() { return byte_offset_of(Thread, omFreeList); } 494169689Skan 495169689Skan#define TLAB_FIELD_OFFSET(name) \ 496169689Skan static ByteSize tlab_##name##_offset() { return byte_offset_of(Thread, _tlab) + ThreadLocalAllocBuffer::name##_offset(); } 497169689Skan 498169689Skan TLAB_FIELD_OFFSET(start) 499169689Skan TLAB_FIELD_OFFSET(end) 500169689Skan TLAB_FIELD_OFFSET(top) 501169689Skan TLAB_FIELD_OFFSET(pf_top) 502169689Skan TLAB_FIELD_OFFSET(size) // desired_size 503169689Skan TLAB_FIELD_OFFSET(refill_waste_limit) 504169689Skan TLAB_FIELD_OFFSET(number_of_refills) 505169689Skan TLAB_FIELD_OFFSET(fast_refill_waste) 506169689Skan TLAB_FIELD_OFFSET(slow_allocations) 507169689Skan 508169689Skan#undef TLAB_FIELD_OFFSET 509169689Skan 510169689Skan public: 511169689Skan volatile intptr_t _Stalled ; 512169689Skan volatile int _TypeTag ; 513169689Skan ParkEvent * _ParkEvent ; // for synchronized() 514169689Skan ParkEvent * _SleepEvent ; // for Thread.sleep 515169689Skan ParkEvent * _MutexEvent ; // for native internal Mutex/Monitor 516169689Skan ParkEvent * _MuxEvent ; // for low-level muxAcquire-muxRelease 517169689Skan int NativeSyncRecursion ; // diagnostic 518169689Skan 519169689Skan volatile int _OnTrap ; // Resume-at IP delta 520169689Skan jint _hashStateW ; // Marsaglia Shift-XOR thread-local RNG 521169689Skan jint _hashStateX ; // thread-specific hashCode generator state 522169689Skan jint _hashStateY ; 523169689Skan jint _hashStateZ ; 524169689Skan void * _schedctl ; 525169689Skan 526169689Skan intptr_t _ScratchA, _ScratchB ; // Scratch locations for fast-path sync code 527169689Skan static ByteSize ScratchA_offset() { return byte_offset_of(Thread, _ScratchA ); } 528169689Skan static ByteSize ScratchB_offset() { return byte_offset_of(Thread, _ScratchB ); } 529169689Skan 530169689Skan volatile jint rng [4] ; // RNG for spin loop 531169689Skan 532169689Skan // Low-level leaf-lock primitives used to implement synchronization 533169689Skan // and native monitor-mutex infrastructure. 534169689Skan // Not for general synchronization use. 535169689Skan static void SpinAcquire (volatile int * Lock, const char * Name) ; 536169689Skan static void SpinRelease (volatile int * Lock) ; 537169689Skan static void muxAcquire (volatile intptr_t * Lock, const char * Name) ; 538169689Skan static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ; 539169689Skan static void muxRelease (volatile intptr_t * Lock) ; 540169689Skan 541169689Skan}; 542169689Skan 543169689Skan// Inline implementation of Thread::current() 544169689Skan// Thread::current is "hot" it's called > 128K times in the 1st 500 msecs of 545169689Skan// startup. 546169689Skan// ThreadLocalStorage::thread is warm -- it's called > 16K times in the same 547169689Skan// period. This is inlined in thread_<os_family>.inline.hpp. 548169689Skan 549169689Skaninline Thread* Thread::current() { 550169689Skan#ifdef ASSERT 551169689Skan// This function is very high traffic. Define PARANOID to enable expensive 552169689Skan// asserts. 553169689Skan#ifdef PARANOID 554169689Skan // Signal handler should call ThreadLocalStorage::get_thread_slow() 555169689Skan Thread* t = ThreadLocalStorage::get_thread_slow(); 556169689Skan assert(t != NULL && !t->is_inside_signal_handler(), 557169689Skan "Don't use Thread::current() inside signal handler"); 558169689Skan#endif 559169689Skan#endif 560169689Skan Thread* thread = ThreadLocalStorage::thread(); 561169689Skan assert(thread != NULL, "just checking"); 562169689Skan return thread; 563169689Skan} 564169689Skan 565169689Skan// Name support for threads. non-JavaThread subclasses with multiple 566169689Skan// uniquely named instances should derive from this. 567169689Skanclass NamedThread: public Thread { 568169689Skan friend class VMStructs; 569169689Skan enum { 570169689Skan max_name_len = 64 571169689Skan }; 572169689Skan private: 573169689Skan char* _name; 574169689Skan public: 575169689Skan NamedThread(); 576169689Skan ~NamedThread(); 577169689Skan // May only be called once per thread. 578169689Skan void set_name(const char* format, ...); 579169689Skan virtual char* name() const { return _name == NULL ? (char*)"Unknown Thread" : _name; } 580169689Skan}; 581169689Skan 582169689Skan// Worker threads are named and have an id of an assigned work. 583169689Skanclass WorkerThread: public NamedThread { 584169689Skanprivate: 585169689Skan uint _id; 586169689Skanpublic: 587169689Skan WorkerThread() : _id(0) { } 588169689Skan void set_id(uint work_id) { _id = work_id; } 589169689Skan uint id() const { return _id; } 590169689Skan}; 591169689Skan 592169689Skan// A single WatcherThread is used for simulating timer interrupts. 593169689Skanclass WatcherThread: public Thread { 594169689Skan friend class VMStructs; 595169689Skan public: 596169689Skan virtual void run(); 597169689Skan 598169689Skan private: 599169689Skan static WatcherThread* _watcher_thread; 600169689Skan 601169689Skan static bool _should_terminate; 602169689Skan public: 603169689Skan enum SomeConstants { 604169689Skan delay_interval = 10 // interrupt delay in milliseconds 605169689Skan }; 606169689Skan 607169689Skan // Constructor 608169689Skan WatcherThread(); 609169689Skan 610169689Skan // Tester 611169689Skan bool is_Watcher_thread() const { return true; } 612169689Skan 613169689Skan // Printing 614169689Skan char* name() const { return (char*)"VM Periodic Task Thread"; } 615169689Skan void print_on(outputStream* st) const; 616169689Skan void print() const { print_on(tty); } 617169689Skan 618169689Skan // Returns the single instance of WatcherThread 619169689Skan static WatcherThread* watcher_thread() { return _watcher_thread; } 620169689Skan 621169689Skan // Create and start the single instance of WatcherThread, or stop it on shutdown 622169689Skan static void start(); 623169689Skan static void stop(); 624169689Skan}; 625169689Skan 626169689Skan 627169689Skanclass CompilerThread; 628169689Skan 629169689Skantypedef void (*ThreadFunction)(JavaThread*, TRAPS); 630169689Skan 631169689Skanclass JavaThread: public Thread { 632169689Skan friend class VMStructs; 633169689Skan private: 634169689Skan JavaThread* _next; // The next thread in the Threads list 635169689Skan oop _threadObj; // The Java level thread object 636169689Skan 637169689Skan#ifdef ASSERT 638169689Skan private: 639169689Skan int _java_call_counter; 640169689Skan 641169689Skan public: 642169689Skan int java_call_counter() { return _java_call_counter; } 643169689Skan void inc_java_call_counter() { _java_call_counter++; } 644169689Skan void dec_java_call_counter() { 645169689Skan assert(_java_call_counter > 0, "Invalid nesting of JavaCallWrapper"); 646169689Skan _java_call_counter--; 647169689Skan } 648169689Skan private: // restore original namespace restriction 649169689Skan#endif // ifdef ASSERT 650169689Skan 651169689Skan#ifndef PRODUCT 652169689Skan public: 653169689Skan enum { 654169689Skan jump_ring_buffer_size = 16 655169689Skan }; 656169689Skan private: // restore original namespace restriction 657169689Skan#endif 658169689Skan 659169689Skan JavaFrameAnchor _anchor; // Encapsulation of current java frame and it state 660169689Skan 661169689Skan ThreadFunction _entry_point; 662169689Skan 663169689Skan JNIEnv _jni_environment; 664169689Skan 665169689Skan // Deopt support 666169689Skan DeoptResourceMark* _deopt_mark; // Holds special ResourceMark for deoptimization 667169689Skan 668169689Skan intptr_t* _must_deopt_id; // id of frame that needs to be deopted once we 669169689Skan // transition out of native 670169689Skan 671169689Skan vframeArray* _vframe_array_head; // Holds the heap of the active vframeArrays 672169689Skan vframeArray* _vframe_array_last; // Holds last vFrameArray we popped 673169689Skan // Because deoptimization is lazy we must save jvmti requests to set locals 674169689Skan // in compiled frames until we deoptimize and we have an interpreter frame. 675169689Skan // This holds the pointer to array (yeah like there might be more than one) of 676169689Skan // description of compiled vframes that have locals that need to be updated. 677169689Skan GrowableArray<jvmtiDeferredLocalVariableSet*>* _deferred_locals_updates; 678169689Skan 679169689Skan // Handshake value for fixing 6243940. We need a place for the i2c 680169689Skan // adapter to store the callee methodOop. This value is NEVER live 681169689Skan // across a gc point so it does NOT have to be gc'd 682169689Skan // The handshake is open ended since we can't be certain that it will 683169689Skan // be NULLed. This is because we rarely ever see the race and end up 684169689Skan // in handle_wrong_method which is the backend of the handshake. See 685169689Skan // code in i2c adapters and handle_wrong_method. 686169689Skan 687169689Skan methodOop _callee_target; 688169689Skan 689169689Skan // Oop results of VM runtime calls 690169689Skan oop _vm_result; // Used to pass back an oop result into Java code, GC-preserved 691169689Skan oop _vm_result_2; // Used to pass back an oop result into Java code, GC-preserved 692169689Skan 693169689Skan MonitorChunk* _monitor_chunks; // Contains the off stack monitors 694169689Skan // allocated during deoptimization 695169689Skan // and by JNI_MonitorEnter/Exit 696169689Skan 697169689Skan // Async. requests support 698169689Skan enum AsyncRequests { 699169689Skan _no_async_condition = 0, 700169689Skan _async_exception, 701169689Skan _async_unsafe_access_error 702169689Skan }; 703169689Skan AsyncRequests _special_runtime_exit_condition; // Enum indicating pending async. request 704169689Skan oop _pending_async_exception; 705169689Skan 706169689Skan // Safepoint support 707169689Skan public: // Expose _thread_state for SafeFetchInt() 708169689Skan volatile JavaThreadState _thread_state; 709169689Skan private: 710169689Skan ThreadSafepointState *_safepoint_state; // Holds information about a thread during a safepoint 711169689Skan address _saved_exception_pc; // Saved pc of instruction where last implicit exception happened 712169689Skan 713169689Skan // JavaThread termination support 714169689Skan enum TerminatedTypes { 715169689Skan _not_terminated = 0xDEAD - 2, 716169689Skan _thread_exiting, // JavaThread::exit() has been called for this thread 717169689Skan _thread_terminated, // JavaThread is removed from thread list 718169689Skan _vm_exited // JavaThread is still executing native code, but VM is terminated 719169689Skan // only VM_Exit can set _vm_exited 720169689Skan }; 721169689Skan 722169689Skan // In general a JavaThread's _terminated field transitions as follows: 723169689Skan // 724169689Skan // _not_terminated => _thread_exiting => _thread_terminated 725169689Skan // 726169689Skan // _vm_exited is a special value to cover the case of a JavaThread 727169689Skan // executing native code after the VM itself is terminated. 728169689Skan TerminatedTypes _terminated; 729169689Skan // suspend/resume support 730169689Skan volatile bool _suspend_equivalent; // Suspend equivalent condition 731169689Skan jint _in_deopt_handler; // count of deoptimization 732169689Skan // handlers thread is in 733169689Skan volatile bool _doing_unsafe_access; // Thread may fault due to unsafe access 734169689Skan bool _do_not_unlock_if_synchronized; // Do not unlock the receiver of a synchronized method (since it was 735169689Skan // never locked) when throwing an exception. Used by interpreter only. 736169689Skan 737169689Skan // Flag to mark a JNI thread in the process of attaching - See CR 6404306 738169689Skan // This flag is never set true other than at construction, and in that case 739169689Skan // is shortly thereafter set false 740169689Skan volatile bool _is_attaching; 741169689Skan 742169689Skan public: 743169689Skan // State of the stack guard pages for this thread. 744169689Skan enum StackGuardState { 745169689Skan stack_guard_unused, // not needed 746169689Skan stack_guard_yellow_disabled,// disabled (temporarily) after stack overflow 747169689Skan stack_guard_enabled // enabled 748169689Skan }; 749169689Skan 750169689Skan private: 751169689Skan 752169689Skan StackGuardState _stack_guard_state; 753169689Skan 754169689Skan // Compiler exception handling (NOTE: The _exception_oop is *NOT* the same as _pending_exception. It is 755169689Skan // used to temp. parsing values into and out of the runtime system during exception handling for compiled 756169689Skan // code) 757169689Skan volatile oop _exception_oop; // Exception thrown in compiled code 758169689Skan volatile address _exception_pc; // PC where exception happened 759169689Skan volatile address _exception_handler_pc; // PC for handler of exception 760169689Skan volatile int _exception_stack_size; // Size of frame where exception happened 761169689Skan 762169689Skan // support for compilation 763169689Skan bool _is_compiling; // is true if a compilation is active inthis thread (one compilation per thread possible) 764169689Skan 765169689Skan // support for JNI critical regions 766169689Skan jint _jni_active_critical; // count of entries into JNI critical region 767169689Skan 768169689Skan // For deadlock detection. 769169689Skan int _depth_first_number; 770169689Skan 771169689Skan // JVMTI PopFrame support 772169689Skan // This is set to popframe_pending to signal that top Java frame should be popped immediately 773169689Skan int _popframe_condition; 774169689Skan 775169689Skan#ifndef PRODUCT 776169689Skan int _jmp_ring_index; 777169689Skan struct { 778169689Skan // We use intptr_t instead of address so debugger doesn't try and display strings 779169689Skan intptr_t _target; 780169689Skan intptr_t _instruction; 781169689Skan const char* _file; 782169689Skan int _line; 783169689Skan } _jmp_ring[ jump_ring_buffer_size ]; 784169689Skan#endif /* PRODUCT */ 785169689Skan 786169689Skan#ifndef SERIALGC 787169689Skan // Support for G1 barriers 788169689Skan 789169689Skan ObjPtrQueue _satb_mark_queue; // Thread-local log for SATB barrier. 790169689Skan // Set of all such queues. 791169689Skan static SATBMarkQueueSet _satb_mark_queue_set; 792169689Skan 793169689Skan DirtyCardQueue _dirty_card_queue; // Thread-local log for dirty cards. 794169689Skan // Set of all such queues. 795169689Skan static DirtyCardQueueSet _dirty_card_queue_set; 796169689Skan 797169689Skan void flush_barrier_queues(); 798169689Skan#endif // !SERIALGC 799169689Skan 800169689Skan friend class VMThread; 801169689Skan friend class ThreadWaitTransition; 802169689Skan friend class VM_Exit; 803169689Skan 804169689Skan void initialize(); // Initialized the instance variables 805169689Skan 806169689Skan public: 807169689Skan // Constructor 808169689Skan JavaThread(bool is_attaching = false); // for main thread and JNI attached threads 809169689Skan JavaThread(ThreadFunction entry_point, size_t stack_size = 0); 810169689Skan ~JavaThread(); 811169689Skan 812169689Skan#ifdef ASSERT 813169689Skan // verify this JavaThread hasn't be published in the Threads::list yet 814169689Skan void verify_not_published(); 815169689Skan#endif 816169689Skan 817169689Skan //JNI functiontable getter/setter for JVMTI jni function table interception API. 818169689Skan void set_jni_functions(struct JNINativeInterface_* functionTable) { 819169689Skan _jni_environment.functions = functionTable; 820169689Skan } 821169689Skan struct JNINativeInterface_* get_jni_functions() { 822169689Skan return (struct JNINativeInterface_ *)_jni_environment.functions; 823169689Skan } 824169689Skan 825169689Skan // Executes Shutdown.shutdown() 826169689Skan void invoke_shutdown_hooks(); 827169689Skan 828169689Skan // Cleanup on thread exit 829169689Skan enum ExitType { 830169689Skan normal_exit, 831169689Skan jni_detach 832169689Skan }; 833169689Skan void exit(bool destroy_vm, ExitType exit_type = normal_exit); 834169689Skan 835169689Skan void cleanup_failed_attach_current_thread(); 836169689Skan 837169689Skan // Testers 838169689Skan virtual bool is_Java_thread() const { return true; } 839169689Skan 840169689Skan // compilation 841169689Skan void set_is_compiling(bool f) { _is_compiling = f; } 842169689Skan bool is_compiling() const { return _is_compiling; } 843169689Skan 844169689Skan // Thread chain operations 845169689Skan JavaThread* next() const { return _next; } 846169689Skan void set_next(JavaThread* p) { _next = p; } 847169689Skan 848169689Skan // Thread oop. threadObj() can be NULL for initial JavaThread 849169689Skan // (or for threads attached via JNI) 850169689Skan oop threadObj() const { return _threadObj; } 851169689Skan void set_threadObj(oop p) { _threadObj = p; } 852169689Skan 853169689Skan ThreadPriority java_priority() const; // Read from threadObj() 854169689Skan 855169689Skan // Prepare thread and add to priority queue. If a priority is 856169689Skan // not specified, use the priority of the thread object. Threads_lock 857169689Skan // must be held while this function is called. 858169689Skan void prepare(jobject jni_thread, ThreadPriority prio=NoPriority); 859169689Skan 860169689Skan void set_saved_exception_pc(address pc) { _saved_exception_pc = pc; } 861169689Skan address saved_exception_pc() { return _saved_exception_pc; } 862169689Skan 863169689Skan 864169689Skan ThreadFunction entry_point() const { return _entry_point; } 865169689Skan 866169689Skan // Allocates a new Java level thread object for this thread. thread_name may be NULL. 867169689Skan void allocate_threadObj(Handle thread_group, char* thread_name, bool daemon, TRAPS); 868169689Skan 869169689Skan // Last frame anchor routines 870169689Skan 871169689Skan JavaFrameAnchor* frame_anchor(void) { return &_anchor; } 872169689Skan 873169689Skan // last_Java_sp 874169689Skan bool has_last_Java_frame() const { return _anchor.has_last_Java_frame(); } 875169689Skan intptr_t* last_Java_sp() const { return _anchor.last_Java_sp(); } 876169689Skan 877169689Skan // last_Java_pc 878169689Skan 879169689Skan address last_Java_pc(void) { return _anchor.last_Java_pc(); } 880169689Skan 881169689Skan // Safepoint support 882169689Skan JavaThreadState thread_state() const { return _thread_state; } 883169689Skan void set_thread_state(JavaThreadState s) { _thread_state=s; } 884169689Skan ThreadSafepointState *safepoint_state() const { return _safepoint_state; } 885169689Skan void set_safepoint_state(ThreadSafepointState *state) { _safepoint_state = state; } 886169689Skan bool is_at_poll_safepoint() { return _safepoint_state->is_at_poll_safepoint(); } 887169689Skan 888169689Skan // thread has called JavaThread::exit() or is terminated 889169689Skan bool is_exiting() { return _terminated == _thread_exiting || is_terminated(); } 890169689Skan // thread is terminated (no longer on the threads list); we compare 891169689Skan // against the two non-terminated values so that a freed JavaThread 892169689Skan // will also be considered terminated. 893169689Skan bool is_terminated() { return _terminated != _not_terminated && _terminated != _thread_exiting; } 894169689Skan void set_terminated(TerminatedTypes t) { _terminated = t; } 895169689Skan // special for Threads::remove() which is static: 896169689Skan void set_terminated_value() { _terminated = _thread_terminated; } 897169689Skan void block_if_vm_exited(); 898169689Skan 899169689Skan bool doing_unsafe_access() { return _doing_unsafe_access; } 900169689Skan void set_doing_unsafe_access(bool val) { _doing_unsafe_access = val; } 901169689Skan 902169689Skan bool do_not_unlock_if_synchronized() { return _do_not_unlock_if_synchronized; } 903169689Skan void set_do_not_unlock_if_synchronized(bool val) { _do_not_unlock_if_synchronized = val; } 904169689Skan 905169689Skan 906169689Skan // Suspend/resume support for JavaThread 907169689Skan 908169689Skan private: 909169689Skan void set_ext_suspended() { set_suspend_flag (_ext_suspended); } 910169689Skan void clear_ext_suspended() { clear_suspend_flag(_ext_suspended); } 911169689Skan 912169689Skan public: 913169689Skan void java_suspend(); 914169689Skan void java_resume(); 915169689Skan int java_suspend_self(); 916169689Skan 917169689Skan void check_and_wait_while_suspended() { 918169689Skan assert(JavaThread::current() == this, "sanity check"); 919169689Skan 920169689Skan bool do_self_suspend; 921169689Skan do { 922169689Skan // were we externally suspended while we were waiting? 923169689Skan do_self_suspend = handle_special_suspend_equivalent_condition(); 924169689Skan if (do_self_suspend) { 925169689Skan // don't surprise the thread that suspended us by returning 926169689Skan java_suspend_self(); 927169689Skan set_suspend_equivalent(); 928169689Skan } 929169689Skan } while (do_self_suspend); 930169689Skan } 931169689Skan static void check_safepoint_and_suspend_for_native_trans(JavaThread *thread); 932169689Skan // Check for async exception in addition to safepoint and suspend request. 933169689Skan static void check_special_condition_for_native_trans(JavaThread *thread); 934169689Skan 935169689Skan bool is_ext_suspend_completed(bool called_by_wait, int delay, uint32_t *bits); 936169689Skan bool is_ext_suspend_completed_with_lock(uint32_t *bits) { 937169689Skan MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 938169689Skan // Warning: is_ext_suspend_completed() may temporarily drop the 939169689Skan // SR_lock to allow the thread to reach a stable thread state if 940169689Skan // it is currently in a transient thread state. 941169689Skan return is_ext_suspend_completed(false /*!called_by_wait */, 942169689Skan SuspendRetryDelay, bits); 943169689Skan } 944169689Skan 945169689Skan // We cannot allow wait_for_ext_suspend_completion() to run forever or 946169689Skan // we could hang. SuspendRetryCount and SuspendRetryDelay are normally 947169689Skan // passed as the count and delay parameters. Experiments with specific 948169689Skan // calls to wait_for_ext_suspend_completion() can be done by passing 949169689Skan // other values in the code. Experiments with all calls can be done 950169689Skan // via the appropriate -XX options. 951169689Skan bool wait_for_ext_suspend_completion(int count, int delay, uint32_t *bits); 952169689Skan 953169689Skan void set_external_suspend() { set_suspend_flag (_external_suspend); } 954169689Skan void clear_external_suspend() { clear_suspend_flag(_external_suspend); } 955169689Skan 956169689Skan void set_deopt_suspend() { set_suspend_flag (_deopt_suspend); } 957169689Skan void clear_deopt_suspend() { clear_suspend_flag(_deopt_suspend); } 958169689Skan bool is_deopt_suspend() { return (_suspend_flags & _deopt_suspend) != 0; } 959169689Skan 960169689Skan bool is_external_suspend() const { 961169689Skan return (_suspend_flags & _external_suspend) != 0; 962169689Skan } 963169689Skan // Whenever a thread transitions from native to vm/java it must suspend 964169689Skan // if external|deopt suspend is present. 965169689Skan bool is_suspend_after_native() const { 966169689Skan return (_suspend_flags & (_external_suspend | _deopt_suspend) ) != 0; 967169689Skan } 968169689Skan 969169689Skan // external suspend request is completed 970169689Skan bool is_ext_suspended() const { 971169689Skan return (_suspend_flags & _ext_suspended) != 0; 972169689Skan } 973169689Skan 974169689Skan // legacy method that checked for either external suspension or vm suspension 975169689Skan bool is_any_suspended() const { 976169689Skan return is_ext_suspended(); 977169689Skan } 978169689Skan 979169689Skan bool is_external_suspend_with_lock() const { 980169689Skan MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 981169689Skan return is_external_suspend(); 982169689Skan } 983169689Skan 984169689Skan // Special method to handle a pending external suspend request 985169689Skan // when a suspend equivalent condition lifts. 986169689Skan bool handle_special_suspend_equivalent_condition() { 987169689Skan assert(is_suspend_equivalent(), 988169689Skan "should only be called in a suspend equivalence condition"); 989169689Skan MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 990169689Skan bool ret = is_external_suspend(); 991169689Skan if (!ret) { 992169689Skan // not about to self-suspend so clear suspend equivalence 993169689Skan clear_suspend_equivalent(); 994169689Skan } 995169689Skan // implied else: 996169689Skan // We have a pending external suspend request so we leave the 997169689Skan // suspend_equivalent flag set until java_suspend_self() sets 998169689Skan // the ext_suspended flag and clears the suspend_equivalent 999169689Skan // flag. This insures that wait_for_ext_suspend_completion() 1000169689Skan // will return consistent values. 1001169689Skan return ret; 1002169689Skan } 1003169689Skan 1004169689Skan bool is_any_suspended_with_lock() const { 1005169689Skan MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1006169689Skan return is_any_suspended(); 1007169689Skan } 1008169689Skan // utility methods to see if we are doing some kind of suspension 1009169689Skan bool is_being_ext_suspended() const { 1010169689Skan MutexLockerEx ml(SR_lock(), Mutex::_no_safepoint_check_flag); 1011169689Skan return is_ext_suspended() || is_external_suspend(); 1012169689Skan } 1013169689Skan 1014169689Skan bool is_suspend_equivalent() const { return _suspend_equivalent; } 1015169689Skan 1016169689Skan void set_suspend_equivalent() { _suspend_equivalent = true; }; 1017169689Skan void clear_suspend_equivalent() { _suspend_equivalent = false; }; 1018169689Skan 1019169689Skan // Thread.stop support 1020169689Skan void send_thread_stop(oop throwable); 1021169689Skan AsyncRequests clear_special_runtime_exit_condition() { 1022169689Skan AsyncRequests x = _special_runtime_exit_condition; 1023169689Skan _special_runtime_exit_condition = _no_async_condition; 1024169689Skan return x; 1025169689Skan } 1026169689Skan 1027169689Skan // Are any async conditions present? 1028169689Skan bool has_async_condition() { return (_special_runtime_exit_condition != _no_async_condition); } 1029169689Skan 1030169689Skan void check_and_handle_async_exceptions(bool check_unsafe_error = true); 1031169689Skan 1032169689Skan // these next two are also used for self-suspension and async exception support 1033169689Skan void handle_special_runtime_exit_condition(bool check_asyncs = true); 1034169689Skan 1035169689Skan // Return true if JavaThread has an asynchronous condition or 1036169689Skan // if external suspension is requested. 1037169689Skan bool has_special_runtime_exit_condition() { 1038169689Skan // We call is_external_suspend() last since external suspend should 1039169689Skan // be less common. Because we don't use is_external_suspend_with_lock 1040169689Skan // it is possible that we won't see an asynchronous external suspend 1041169689Skan // request that has just gotten started, i.e., SR_lock grabbed but 1042169689Skan // _external_suspend field change either not made yet or not visible 1043169689Skan // yet. However, this is okay because the request is asynchronous and 1044169689Skan // we will see the new flag value the next time through. It's also 1045169689Skan // possible that the external suspend request is dropped after 1046169689Skan // we have checked is_external_suspend(), we will recheck its value 1047169689Skan // under SR_lock in java_suspend_self(). 1048169689Skan return (_special_runtime_exit_condition != _no_async_condition) || 1049169689Skan is_external_suspend() || is_deopt_suspend(); 1050169689Skan } 1051169689Skan 1052169689Skan void set_pending_unsafe_access_error() { _special_runtime_exit_condition = _async_unsafe_access_error; } 1053169689Skan 1054169689Skan void set_pending_async_exception(oop e) { 1055169689Skan _pending_async_exception = e; 1056169689Skan _special_runtime_exit_condition = _async_exception; 1057169689Skan set_has_async_exception(); 1058169689Skan } 1059169689Skan 1060169689Skan // Fast-locking support 1061169689Skan bool is_lock_owned(address adr) const; 1062169689Skan 1063169689Skan // Accessors for vframe array top 1064169689Skan // The linked list of vframe arrays are sorted on sp. This means when we 1065169689Skan // unpack the head must contain the vframe array to unpack. 1066169689Skan void set_vframe_array_head(vframeArray* value) { _vframe_array_head = value; } 1067169689Skan vframeArray* vframe_array_head() const { return _vframe_array_head; } 1068169689Skan 1069169689Skan // Side structure for defering update of java frame locals until deopt occurs 1070169689Skan GrowableArray<jvmtiDeferredLocalVariableSet*>* deferred_locals() const { return _deferred_locals_updates; } 1071169689Skan void set_deferred_locals(GrowableArray<jvmtiDeferredLocalVariableSet *>* vf) { _deferred_locals_updates = vf; } 1072169689Skan 1073169689Skan // These only really exist to make debugging deopt problems simpler 1074169689Skan 1075169689Skan void set_vframe_array_last(vframeArray* value) { _vframe_array_last = value; } 1076169689Skan vframeArray* vframe_array_last() const { return _vframe_array_last; } 1077169689Skan 1078169689Skan // The special resourceMark used during deoptimization 1079169689Skan 1080169689Skan void set_deopt_mark(DeoptResourceMark* value) { _deopt_mark = value; } 1081169689Skan DeoptResourceMark* deopt_mark(void) { return _deopt_mark; } 1082169689Skan 1083169689Skan intptr_t* must_deopt_id() { return _must_deopt_id; } 1084169689Skan void set_must_deopt_id(intptr_t* id) { _must_deopt_id = id; } 1085169689Skan void clear_must_deopt_id() { _must_deopt_id = NULL; } 1086169689Skan 1087169689Skan methodOop callee_target() const { return _callee_target; } 1088169689Skan void set_callee_target (methodOop x) { _callee_target = x; } 1089169689Skan 1090169689Skan // Oop results of vm runtime calls 1091169689Skan oop vm_result() const { return _vm_result; } 1092169689Skan void set_vm_result (oop x) { _vm_result = x; } 1093169689Skan 1094169689Skan oop vm_result_2() const { return _vm_result_2; } 1095169689Skan void set_vm_result_2 (oop x) { _vm_result_2 = x; } 1096169689Skan 1097169689Skan // Exception handling for compiled methods 1098169689Skan oop exception_oop() const { return _exception_oop; } 1099169689Skan int exception_stack_size() const { return _exception_stack_size; } 1100169689Skan address exception_pc() const { return _exception_pc; } 1101169689Skan address exception_handler_pc() const { return _exception_handler_pc; } 1102169689Skan 1103169689Skan void set_exception_oop(oop o) { _exception_oop = o; } 1104169689Skan void set_exception_pc(address a) { _exception_pc = a; } 1105169689Skan void set_exception_handler_pc(address a) { _exception_handler_pc = a; } 1106169689Skan void set_exception_stack_size(int size) { _exception_stack_size = size; } 1107169689Skan 1108169689Skan // Stack overflow support 1109169689Skan inline size_t stack_available(address cur_sp); 1110169689Skan address stack_yellow_zone_base() 1111169689Skan { return (address)(stack_base() - (stack_size() - (stack_red_zone_size() + stack_yellow_zone_size()))); } 1112169689Skan size_t stack_yellow_zone_size() 1113169689Skan { return StackYellowPages * os::vm_page_size(); } 1114169689Skan address stack_red_zone_base() 1115169689Skan { return (address)(stack_base() - (stack_size() - stack_red_zone_size())); } 1116169689Skan size_t stack_red_zone_size() 1117169689Skan { return StackRedPages * os::vm_page_size(); } 1118169689Skan bool in_stack_yellow_zone(address a) 1119169689Skan { return (a <= stack_yellow_zone_base()) && (a >= stack_red_zone_base()); } 1120169689Skan bool in_stack_red_zone(address a) 1121169689Skan { return (a <= stack_red_zone_base()) && (a >= (address)((intptr_t)stack_base() - stack_size())); } 1122169689Skan 1123169689Skan void create_stack_guard_pages(); 1124169689Skan void remove_stack_guard_pages(); 1125169689Skan 1126169689Skan void enable_stack_yellow_zone(); 1127169689Skan void disable_stack_yellow_zone(); 1128169689Skan void enable_stack_red_zone(); 1129169689Skan void disable_stack_red_zone(); 1130169689Skan 1131169689Skan inline bool stack_yellow_zone_disabled(); 1132169689Skan inline bool stack_yellow_zone_enabled(); 1133169689Skan 1134169689Skan // Attempt to reguard the stack after a stack overflow may have occurred. 1135169689Skan // Returns true if (a) guard pages are not needed on this thread, (b) the 1136169689Skan // pages are already guarded, or (c) the pages were successfully reguarded. 1137169689Skan // Returns false if there is not enough stack space to reguard the pages, in 1138169689Skan // which case the caller should unwind a frame and try again. The argument 1139169689Skan // should be the caller's (approximate) sp. 1140169689Skan bool reguard_stack(address cur_sp); 1141169689Skan // Similar to above but see if current stackpoint is out of the guard area 1142169689Skan // and reguard if possible. 1143169689Skan bool reguard_stack(void); 1144169689Skan 1145169689Skan // Misc. accessors/mutators 1146169689Skan void set_do_not_unlock(void) { _do_not_unlock_if_synchronized = true; } 1147169689Skan void clr_do_not_unlock(void) { _do_not_unlock_if_synchronized = false; } 1148169689Skan bool do_not_unlock(void) { return _do_not_unlock_if_synchronized; } 1149169689Skan 1150169689Skan#ifndef PRODUCT 1151169689Skan void record_jump(address target, address instr, const char* file, int line); 1152169689Skan#endif /* PRODUCT */ 1153169689Skan 1154169689Skan // For assembly stub generation 1155169689Skan static ByteSize threadObj_offset() { return byte_offset_of(JavaThread, _threadObj ); } 1156169689Skan#ifndef PRODUCT 1157169689Skan static ByteSize jmp_ring_index_offset() { return byte_offset_of(JavaThread, _jmp_ring_index ); } 1158169689Skan static ByteSize jmp_ring_offset() { return byte_offset_of(JavaThread, _jmp_ring ); } 1159169689Skan#endif /* PRODUCT */ 1160169689Skan static ByteSize jni_environment_offset() { return byte_offset_of(JavaThread, _jni_environment ); } 1161169689Skan static ByteSize last_Java_sp_offset() { 1162169689Skan return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_sp_offset(); 1163169689Skan } 1164169689Skan static ByteSize last_Java_pc_offset() { 1165169689Skan return byte_offset_of(JavaThread, _anchor) + JavaFrameAnchor::last_Java_pc_offset(); 1166169689Skan } 1167169689Skan static ByteSize frame_anchor_offset() { 1168169689Skan return byte_offset_of(JavaThread, _anchor); 1169169689Skan } 1170169689Skan static ByteSize callee_target_offset() { return byte_offset_of(JavaThread, _callee_target ); } 1171169689Skan static ByteSize vm_result_offset() { return byte_offset_of(JavaThread, _vm_result ); } 1172169689Skan static ByteSize vm_result_2_offset() { return byte_offset_of(JavaThread, _vm_result_2 ); } 1173169689Skan static ByteSize thread_state_offset() { return byte_offset_of(JavaThread, _thread_state ); } 1174169689Skan static ByteSize saved_exception_pc_offset() { return byte_offset_of(JavaThread, _saved_exception_pc ); } 1175169689Skan static ByteSize osthread_offset() { return byte_offset_of(JavaThread, _osthread ); } 1176169689Skan static ByteSize exception_oop_offset() { return byte_offset_of(JavaThread, _exception_oop ); } 1177169689Skan static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); } 1178169689Skan static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } 1179169689Skan static ByteSize exception_stack_size_offset() { return byte_offset_of(JavaThread, _exception_stack_size); } 1180169689Skan static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); } 1181169689Skan static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); } 1182169689Skan 1183169689Skan static ByteSize do_not_unlock_if_synchronized_offset() { return byte_offset_of(JavaThread, _do_not_unlock_if_synchronized); } 1184169689Skan 1185169689Skan#ifndef SERIALGC 1186169689Skan static ByteSize satb_mark_queue_offset() { return byte_offset_of(JavaThread, _satb_mark_queue); } 1187169689Skan static ByteSize dirty_card_queue_offset() { return byte_offset_of(JavaThread, _dirty_card_queue); } 1188169689Skan#endif // !SERIALGC 1189169689Skan 1190169689Skan // Returns the jni environment for this thread 1191169689Skan JNIEnv* jni_environment() { return &_jni_environment; } 1192169689Skan 1193169689Skan static JavaThread* thread_from_jni_environment(JNIEnv* env) { 1194169689Skan JavaThread *thread_from_jni_env = (JavaThread*)((intptr_t)env - in_bytes(jni_environment_offset())); 1195169689Skan // Only return NULL if thread is off the thread list; starting to 1196169689Skan // exit should not return NULL. 1197169689Skan if (thread_from_jni_env->is_terminated()) { 1198169689Skan thread_from_jni_env->block_if_vm_exited(); 1199169689Skan return NULL; 1200169689Skan } else { 1201169689Skan return thread_from_jni_env; 1202169689Skan } 1203169689Skan } 1204169689Skan 1205169689Skan // JNI critical regions. These can nest. 1206169689Skan bool in_critical() { return _jni_active_critical > 0; } 1207169689Skan void enter_critical() { assert(Thread::current() == this, 1208169689Skan "this must be current thread"); 1209169689Skan _jni_active_critical++; } 1210169689Skan void exit_critical() { assert(Thread::current() == this, 1211169689Skan "this must be current thread"); 1212169689Skan _jni_active_critical--; 1213169689Skan assert(_jni_active_critical >= 0, 1214169689Skan "JNI critical nesting problem?"); } 1215169689Skan 1216169689Skan // For deadlock detection 1217169689Skan int depth_first_number() { return _depth_first_number; } 1218169689Skan void set_depth_first_number(int dfn) { _depth_first_number = dfn; } 1219169689Skan 1220169689Skan private: 1221169689Skan void set_monitor_chunks(MonitorChunk* monitor_chunks) { _monitor_chunks = monitor_chunks; } 1222169689Skan 1223169689Skan public: 1224169689Skan MonitorChunk* monitor_chunks() const { return _monitor_chunks; } 1225169689Skan void add_monitor_chunk(MonitorChunk* chunk); 1226169689Skan void remove_monitor_chunk(MonitorChunk* chunk); 1227169689Skan bool in_deopt_handler() const { return _in_deopt_handler > 0; } 1228169689Skan void inc_in_deopt_handler() { _in_deopt_handler++; } 1229169689Skan void dec_in_deopt_handler() { 1230169689Skan assert(_in_deopt_handler > 0, "mismatched deopt nesting"); 1231169689Skan if (_in_deopt_handler > 0) { // robustness 1232169689Skan _in_deopt_handler--; 1233169689Skan } 1234169689Skan } 1235169689Skan 1236169689Skan private: 1237169689Skan void set_entry_point(ThreadFunction entry_point) { _entry_point = entry_point; } 1238169689Skan 1239169689Skan public: 1240169689Skan 1241169689Skan // Frame iteration; calls the function f for all frames on the stack 1242169689Skan void frames_do(void f(frame*, const RegisterMap*)); 1243169689Skan 1244169689Skan // Memory operations 1245169689Skan void oops_do(OopClosure* f); 1246169689Skan 1247169689Skan // Sweeper operations 1248169689Skan void nmethods_do(); 1249169689Skan 1250169689Skan // Memory management operations 1251169689Skan void gc_epilogue(); 1252169689Skan void gc_prologue(); 1253169689Skan 1254169689Skan // Misc. operations 1255169689Skan char* name() const { return (char*)get_thread_name(); } 1256169689Skan void print_on(outputStream* st) const; 1257169689Skan void print() const { print_on(tty); } 1258169689Skan void print_value(); 1259169689Skan void print_thread_state_on(outputStream* ) const PRODUCT_RETURN; 1260169689Skan void print_thread_state() const PRODUCT_RETURN; 1261169689Skan void print_on_error(outputStream* st, char* buf, int buflen) const; 1262169689Skan void verify(); 1263169689Skan const char* get_thread_name() const; 1264169689Skanprivate: 1265169689Skan // factor out low-level mechanics for use in both normal and error cases 1266169689Skan const char* get_thread_name_string(char* buf = NULL, int buflen = 0) const; 1267169689Skanpublic: 1268169689Skan const char* get_threadgroup_name() const; 1269169689Skan const char* get_parent_name() const; 1270169689Skan 1271169689Skan // Accessing frames 1272169689Skan frame last_frame() { 1273169689Skan _anchor.make_walkable(this); 1274169689Skan return pd_last_frame(); 1275169689Skan } 1276169689Skan javaVFrame* last_java_vframe(RegisterMap* reg_map); 1277169689Skan 1278169689Skan // Returns method at 'depth' java or native frames down the stack 1279169689Skan // Used for security checks 1280169689Skan klassOop security_get_caller_class(int depth); 1281169689Skan 1282169689Skan // Print stack trace in external format 1283169689Skan void print_stack_on(outputStream* st); 1284169689Skan void print_stack() { print_stack_on(tty); } 1285169689Skan 1286169689Skan // Print stack traces in various internal formats 1287169689Skan void trace_stack() PRODUCT_RETURN; 1288169689Skan void trace_stack_from(vframe* start_vf) PRODUCT_RETURN; 1289169689Skan void trace_frames() PRODUCT_RETURN; 1290169689Skan 1291169689Skan // Returns the number of stack frames on the stack 1292169689Skan int depth() const; 1293169689Skan 1294169689Skan // Function for testing deoptimization 1295169689Skan void deoptimize(); 1296169689Skan void make_zombies(); 1297169689Skan 1298169689Skan void deoptimized_wrt_marked_nmethods(); 1299169689Skan 1300169689Skan // Profiling operation (see fprofile.cpp) 1301169689Skan public: 1302169689Skan bool profile_last_Java_frame(frame* fr); 1303169689Skan 1304169689Skan private: 1305169689Skan ThreadProfiler* _thread_profiler; 1306169689Skan private: 1307169689Skan friend class FlatProfiler; // uses both [gs]et_thread_profiler. 1308169689Skan friend class FlatProfilerTask; // uses get_thread_profiler. 1309169689Skan friend class ThreadProfilerMark; // uses get_thread_profiler. 1310169689Skan ThreadProfiler* get_thread_profiler() { return _thread_profiler; } 1311169689Skan ThreadProfiler* set_thread_profiler(ThreadProfiler* tp) { 1312169689Skan ThreadProfiler* result = _thread_profiler; 1313169689Skan _thread_profiler = tp; 1314169689Skan return result; 1315169689Skan } 1316169689Skan 1317169689Skan // Static operations 1318169689Skan public: 1319169689Skan // Returns the running thread as a JavaThread 1320169689Skan static inline JavaThread* current(); 1321169689Skan 1322169689Skan // Returns the active Java thread. Do not use this if you know you are calling 1323169689Skan // from a JavaThread, as it's slower than JavaThread::current. If called from 1324169689Skan // the VMThread, it also returns the JavaThread that instigated the VMThread's 1325169689Skan // operation. You may not want that either. 1326169689Skan static JavaThread* active(); 1327169689Skan 1328169689Skan inline CompilerThread* as_CompilerThread(); 1329169689Skan 1330169689Skan public: 1331169689Skan virtual void run(); 1332169689Skan void thread_main_inner(); 1333169689Skan 1334169689Skan private: 1335169689Skan // PRIVILEGED STACK 1336169689Skan PrivilegedElement* _privileged_stack_top; 1337169689Skan GrowableArray<oop>* _array_for_gc; 1338169689Skan public: 1339169689Skan 1340169689Skan // Returns the privileged_stack information. 1341169689Skan PrivilegedElement* privileged_stack_top() const { return _privileged_stack_top; } 1342169689Skan void set_privileged_stack_top(PrivilegedElement *e) { _privileged_stack_top = e; } 1343169689Skan void register_array_for_gc(GrowableArray<oop>* array) { _array_for_gc = array; } 1344169689Skan 1345169689Skan public: 1346169689Skan // Thread local information maintained by JVMTI. 1347169689Skan void set_jvmti_thread_state(JvmtiThreadState *value) { _jvmti_thread_state = value; } 1348169689Skan JvmtiThreadState *jvmti_thread_state() const { return _jvmti_thread_state; } 1349169689Skan static ByteSize jvmti_thread_state_offset() { return byte_offset_of(JavaThread, _jvmti_thread_state); } 1350169689Skan void set_jvmti_get_loaded_classes_closure(JvmtiGetLoadedClassesClosure* value) { _jvmti_get_loaded_classes_closure = value; } 1351169689Skan JvmtiGetLoadedClassesClosure* get_jvmti_get_loaded_classes_closure() const { return _jvmti_get_loaded_classes_closure; } 1352169689Skan 1353169689Skan // JVMTI PopFrame support 1354169689Skan // Setting and clearing popframe_condition 1355169689Skan // All of these enumerated values are bits. popframe_pending 1356169689Skan // indicates that a PopFrame() has been requested and not yet been 1357169689Skan // completed. popframe_processing indicates that that PopFrame() is in 1358169689Skan // the process of being completed. popframe_force_deopt_reexecution_bit 1359169689Skan // indicates that special handling is required when returning to a 1360169689Skan // deoptimized caller. 1361169689Skan enum PopCondition { 1362169689Skan popframe_inactive = 0x00, 1363169689Skan popframe_pending_bit = 0x01, 1364169689Skan popframe_processing_bit = 0x02, 1365169689Skan popframe_force_deopt_reexecution_bit = 0x04 1366169689Skan }; 1367169689Skan PopCondition popframe_condition() { return (PopCondition) _popframe_condition; } 1368169689Skan void set_popframe_condition(PopCondition c) { _popframe_condition = c; } 1369169689Skan void set_popframe_condition_bit(PopCondition c) { _popframe_condition |= c; } 1370169689Skan void clear_popframe_condition() { _popframe_condition = popframe_inactive; } 1371169689Skan static ByteSize popframe_condition_offset() { return byte_offset_of(JavaThread, _popframe_condition); } 1372169689Skan bool has_pending_popframe() { return (popframe_condition() & popframe_pending_bit) != 0; } 1373169689Skan bool popframe_forcing_deopt_reexecution() { return (popframe_condition() & popframe_force_deopt_reexecution_bit) != 0; } 1374169689Skan void clear_popframe_forcing_deopt_reexecution() { _popframe_condition &= ~popframe_force_deopt_reexecution_bit; } 1375169689Skan#ifdef CC_INTERP 1376169689Skan bool pop_frame_pending(void) { return ((_popframe_condition & popframe_pending_bit) != 0); } 1377169689Skan void clr_pop_frame_pending(void) { _popframe_condition = popframe_inactive; } 1378169689Skan bool pop_frame_in_process(void) { return ((_popframe_condition & popframe_processing_bit) != 0); } 1379169689Skan void set_pop_frame_in_process(void) { _popframe_condition |= popframe_processing_bit; } 1380169689Skan void clr_pop_frame_in_process(void) { _popframe_condition &= ~popframe_processing_bit; } 1381169689Skan#endif 1382169689Skan 1383169689Skan private: 1384169689Skan // Saved incoming arguments to popped frame. 1385169689Skan // Used only when popped interpreted frame returns to deoptimized frame. 1386169689Skan void* _popframe_preserved_args; 1387169689Skan int _popframe_preserved_args_size; 1388169689Skan 1389169689Skan public: 1390169689Skan void popframe_preserve_args(ByteSize size_in_bytes, void* start); 1391169689Skan void* popframe_preserved_args(); 1392169689Skan ByteSize popframe_preserved_args_size(); 1393169689Skan WordSize popframe_preserved_args_size_in_words(); 1394169689Skan void popframe_free_preserved_args(); 1395169689Skan 1396169689Skan 1397169689Skan private: 1398169689Skan JvmtiThreadState *_jvmti_thread_state; 1399169689Skan JvmtiGetLoadedClassesClosure* _jvmti_get_loaded_classes_closure; 1400169689Skan 1401169689Skan // Used by the interpreter in fullspeed mode for frame pop, method 1402169689Skan // entry, method exit and single stepping support. This field is 1403169689Skan // only set to non-zero by the VM_EnterInterpOnlyMode VM operation. 1404169689Skan // It can be set to zero asynchronously (i.e., without a VM operation 1405169689Skan // or a lock) so we have to be very careful. 1406169689Skan int _interp_only_mode; 1407169689Skan 1408169689Skan public: 1409169689Skan // used by the interpreter for fullspeed debugging support (see above) 1410169689Skan static ByteSize interp_only_mode_offset() { return byte_offset_of(JavaThread, _interp_only_mode); } 1411169689Skan bool is_interp_only_mode() { return (_interp_only_mode != 0); } 1412169689Skan int get_interp_only_mode() { return _interp_only_mode; } 1413169689Skan void increment_interp_only_mode() { ++_interp_only_mode; } 1414169689Skan void decrement_interp_only_mode() { --_interp_only_mode; } 1415169689Skan 1416169689Skan private: 1417169689Skan ThreadStatistics *_thread_stat; 1418169689Skan 1419169689Skan public: 1420169689Skan ThreadStatistics* get_thread_stat() const { return _thread_stat; } 1421169689Skan 1422169689Skan // Return a blocker object for which this thread is blocked parking. 1423169689Skan oop current_park_blocker(); 1424169689Skan 1425169689Skan private: 1426169689Skan static size_t _stack_size_at_create; 1427169689Skan 1428169689Skan public: 1429169689Skan static inline size_t stack_size_at_create(void) { 1430169689Skan return _stack_size_at_create; 1431169689Skan } 1432169689Skan static inline void set_stack_size_at_create(size_t value) { 1433169689Skan _stack_size_at_create = value; 1434169689Skan } 1435169689Skan 1436169689Skan#ifndef SERIALGC 1437169689Skan // SATB marking queue support 1438169689Skan ObjPtrQueue& satb_mark_queue() { return _satb_mark_queue; } 1439169689Skan static SATBMarkQueueSet& satb_mark_queue_set() { 1440169689Skan return _satb_mark_queue_set; 1441169689Skan } 1442169689Skan 1443169689Skan // Dirty card queue support 1444169689Skan DirtyCardQueue& dirty_card_queue() { return _dirty_card_queue; } 1445169689Skan static DirtyCardQueueSet& dirty_card_queue_set() { 1446169689Skan return _dirty_card_queue_set; 1447169689Skan } 1448169689Skan#endif // !SERIALGC 1449169689Skan 1450169689Skan // Machine dependent stuff 1451169689Skan #include "incls/_thread_pd.hpp.incl" 1452169689Skan 1453169689Skan public: 1454169689Skan void set_blocked_on_compilation(bool value) { 1455169689Skan _blocked_on_compilation = value; 1456169689Skan } 1457169689Skan 1458169689Skan bool blocked_on_compilation() { 1459169689Skan return _blocked_on_compilation; 1460169689Skan } 1461169689Skan protected: 1462169689Skan bool _blocked_on_compilation; 1463169689Skan 1464169689Skan 1465169689Skan // JSR166 per-thread parker 1466169689Skanprivate: 1467169689Skan Parker* _parker; 1468169689Skanpublic: 1469169689Skan Parker* parker() { return _parker; } 1470169689Skan 1471169689Skan // Biased locking support 1472169689Skanprivate: 1473169689Skan GrowableArray<MonitorInfo*>* _cached_monitor_info; 1474169689Skanpublic: 1475169689Skan GrowableArray<MonitorInfo*>* cached_monitor_info() { return _cached_monitor_info; } 1476169689Skan void set_cached_monitor_info(GrowableArray<MonitorInfo*>* info) { _cached_monitor_info = info; } 1477169689Skan 1478169689Skan // clearing/querying jni attach status 1479169689Skan bool is_attaching() const { return _is_attaching; } 1480169689Skan void set_attached() { _is_attaching = false; OrderAccess::fence(); } 1481169689Skanprivate: 1482169689Skan // This field is used to determine if a thread has claimed 1483169689Skan // a par_id: it is -1 if the thread has not claimed a par_id; 1484169689Skan // otherwise its value is the par_id that has been claimed. 1485169689Skan int _claimed_par_id; 1486169689Skanpublic: 1487169689Skan int get_claimed_par_id() { return _claimed_par_id; } 1488169689Skan void set_claimed_par_id(int id) { _claimed_par_id = id;} 1489169689Skan}; 1490169689Skan 1491169689Skan// Inline implementation of JavaThread::current 1492169689Skaninline JavaThread* JavaThread::current() { 1493169689Skan Thread* thread = ThreadLocalStorage::thread(); 1494169689Skan assert(thread != NULL && thread->is_Java_thread(), "just checking"); 1495169689Skan return (JavaThread*)thread; 1496169689Skan} 1497169689Skan 1498169689Skaninline CompilerThread* JavaThread::as_CompilerThread() { 1499169689Skan assert(is_Compiler_thread(), "just checking"); 1500169689Skan return (CompilerThread*)this; 1501169689Skan} 1502169689Skan 1503169689Skaninline bool JavaThread::stack_yellow_zone_disabled() { 1504169689Skan return _stack_guard_state == stack_guard_yellow_disabled; 1505169689Skan} 1506169689Skan 1507169689Skaninline bool JavaThread::stack_yellow_zone_enabled() { 1508169689Skan#ifdef ASSERT 1509169689Skan if (os::uses_stack_guard_pages()) { 1510169689Skan assert(_stack_guard_state != stack_guard_unused, "guard pages must be in use"); 1511169689Skan } 1512169689Skan#endif 1513169689Skan return _stack_guard_state == stack_guard_enabled; 1514169689Skan} 1515169689Skan 1516169689Skaninline size_t JavaThread::stack_available(address cur_sp) { 1517169689Skan // This code assumes java stacks grow down 1518169689Skan address low_addr; // Limit on the address for deepest stack depth 1519169689Skan if ( _stack_guard_state == stack_guard_unused) { 1520169689Skan low_addr = stack_base() - stack_size(); 1521169689Skan } else { 1522169689Skan low_addr = stack_yellow_zone_base(); 1523169689Skan } 1524169689Skan return cur_sp > low_addr ? cur_sp - low_addr : 0; 1525169689Skan} 1526169689Skan 1527169689Skan// A JavaThread for low memory detection support 1528169689Skanclass LowMemoryDetectorThread : public JavaThread { 1529169689Skan friend class VMStructs; 1530169689Skanpublic: 1531169689Skan LowMemoryDetectorThread(ThreadFunction entry_point) : JavaThread(entry_point) {}; 1532169689Skan 1533169689Skan // Hide this thread from external view. 1534169689Skan bool is_hidden_from_external_view() const { return true; } 1535169689Skan}; 1536169689Skan 1537169689Skan// A thread used for Compilation. 1538169689Skanclass CompilerThread : public JavaThread { 1539169689Skan friend class VMStructs; 1540169689Skan private: 1541169689Skan CompilerCounters* _counters; 1542169689Skan 1543169689Skan ciEnv* _env; 1544169689Skan CompileLog* _log; 1545169689Skan CompileTask* _task; 1546169689Skan CompileQueue* _queue; 1547169689Skan 1548169689Skan public: 1549169689Skan 1550169689Skan static CompilerThread* current(); 1551169689Skan 1552169689Skan CompilerThread(CompileQueue* queue, CompilerCounters* counters); 1553169689Skan 1554169689Skan bool is_Compiler_thread() const { return true; } 1555169689Skan // Hide this compiler thread from external view. 1556169689Skan bool is_hidden_from_external_view() const { return true; } 1557169689Skan 1558169689Skan CompileQueue* queue() { return _queue; } 1559169689Skan CompilerCounters* counters() { return _counters; } 1560169689Skan 1561169689Skan // Get/set the thread's compilation environment. 1562169689Skan ciEnv* env() { return _env; } 1563169689Skan void set_env(ciEnv* env) { _env = env; } 1564169689Skan 1565169689Skan // Get/set the thread's logging information 1566169689Skan CompileLog* log() { return _log; } 1567169689Skan void init_log(CompileLog* log) { 1568169689Skan // Set once, for good. 1569169689Skan assert(_log == NULL, "set only once"); 1570169689Skan _log = log; 1571169689Skan } 1572169689Skan 1573169689Skan#ifndef PRODUCT 1574169689Skanprivate: 1575169689Skan IdealGraphPrinter *_ideal_graph_printer; 1576169689Skanpublic: 1577169689Skan IdealGraphPrinter *ideal_graph_printer() { return _ideal_graph_printer; } 1578169689Skan void set_ideal_graph_printer(IdealGraphPrinter *n) { _ideal_graph_printer = n; } 1579169689Skan#endif 1580169689Skan 1581169689Skan // Get/set the thread's current task 1582169689Skan CompileTask* task() { return _task; } 1583169689Skan void set_task(CompileTask* task) { _task = task; } 1584169689Skan}; 1585169689Skan 1586169689Skaninline CompilerThread* CompilerThread::current() { 1587169689Skan return JavaThread::current()->as_CompilerThread(); 1588169689Skan} 1589169689Skan 1590169689Skan 1591169689Skan// The active thread queue. It also keeps track of the current used 1592169689Skan// thread priorities. 1593169689Skanclass Threads: AllStatic { 1594169689Skan friend class VMStructs; 1595169689Skan private: 1596169689Skan static JavaThread* _thread_list; 1597169689Skan static int _number_of_threads; 1598169689Skan static int _number_of_non_daemon_threads; 1599169689Skan static int _return_code; 1600169689Skan 1601169689Skan public: 1602169689Skan // Thread management 1603169689Skan // force_daemon is a concession to JNI, where we may need to add a 1604169689Skan // thread to the thread list before allocating its thread object 1605169689Skan static void add(JavaThread* p, bool force_daemon = false); 1606169689Skan static void remove(JavaThread* p); 1607169689Skan static bool includes(JavaThread* p); 1608169689Skan static JavaThread* first() { return _thread_list; } 1609169689Skan static void threads_do(ThreadClosure* tc); 1610169689Skan 1611169689Skan // Initializes the vm and creates the vm thread 1612169689Skan static jint create_vm(JavaVMInitArgs* args, bool* canTryAgain); 1613169689Skan static void convert_vm_init_libraries_to_agents(); 1614169689Skan static void create_vm_init_libraries(); 1615169689Skan static void create_vm_init_agents(); 1616169689Skan static void shutdown_vm_agents(); 1617169689Skan static bool destroy_vm(); 1618169689Skan // Supported VM versions via JNI 1619169689Skan // Includes JNI_VERSION_1_1 1620169689Skan static jboolean is_supported_jni_version_including_1_1(jint version); 1621169689Skan // Does not include JNI_VERSION_1_1 1622169689Skan static jboolean is_supported_jni_version(jint version); 1623169689Skan 1624169689Skan // Garbage collection 1625169689Skan static void follow_other_roots(void f(oop*)); 1626169689Skan 1627169689Skan // Apply "f->do_oop" to all root oops in all threads. 1628169689Skan // This version may only be called by sequential code. 1629169689Skan static void oops_do(OopClosure* f); 1630169689Skan // This version may be called by sequential or parallel code. 1631169689Skan static void possibly_parallel_oops_do(OopClosure* f); 1632169689Skan // This creates a list of GCTasks, one per thread. 1633169689Skan static void create_thread_roots_tasks(GCTaskQueue* q); 1634169689Skan // This creates a list of GCTasks, one per thread, for marking objects. 1635169689Skan static void create_thread_roots_marking_tasks(GCTaskQueue* q); 1636169689Skan 1637169689Skan // Apply "f->do_oop" to roots in all threads that 1638169689Skan // are part of compiled frames 1639169689Skan static void compiled_frame_oops_do(OopClosure* f); 1640169689Skan 1641169689Skan static void convert_hcode_pointers(); 1642169689Skan static void restore_hcode_pointers(); 1643169689Skan 1644169689Skan // Sweeper 1645169689Skan static void nmethods_do(); 1646169689Skan 1647169689Skan static void gc_epilogue(); 1648169689Skan static void gc_prologue(); 1649169689Skan 1650169689Skan // Verification 1651169689Skan static void verify(); 1652169689Skan static void print_on(outputStream* st, bool print_stacks, bool internal_format, bool print_concurrent_locks); 1653169689Skan static void print(bool print_stacks, bool internal_format) { 1654169689Skan // this function is only used by debug.cpp 1655169689Skan print_on(tty, print_stacks, internal_format, false /* no concurrent lock printed */); 1656169689Skan } 1657169689Skan static void print_on_error(outputStream* st, Thread* current, char* buf, int buflen); 1658169689Skan 1659169689Skan // Get Java threads that are waiting to enter a monitor. If doLock 1660169689Skan // is true, then Threads_lock is grabbed as needed. Otherwise, the 1661169689Skan // VM needs to be at a safepoint. 1662169689Skan static GrowableArray<JavaThread*>* get_pending_threads(int count, 1663169689Skan address monitor, bool doLock); 1664169689Skan 1665169689Skan // Get owning Java thread from the monitor's owner field. If doLock 1666169689Skan // is true, then Threads_lock is grabbed as needed. Otherwise, the 1667169689Skan // VM needs to be at a safepoint. 1668169689Skan static JavaThread *owning_thread_from_monitor_owner(address owner, 1669169689Skan bool doLock); 1670169689Skan 1671169689Skan // Number of threads on the active threads list 1672169689Skan static int number_of_threads() { return _number_of_threads; } 1673169689Skan // Number of non-daemon threads on the active threads list 1674169689Skan static int number_of_non_daemon_threads() { return _number_of_non_daemon_threads; } 1675169689Skan 1676169689Skan // Deoptimizes all frames tied to marked nmethods 1677169689Skan static void deoptimized_wrt_marked_nmethods(); 1678169689Skan 1679169689Skan}; 1680169689Skan 1681169689Skan 1682169689Skan// Thread iterator 1683169689Skanclass ThreadClosure: public StackObj { 1684169689Skan public: 1685169689Skan virtual void do_thread(Thread* thread) = 0; 1686169689Skan}; 1687169689Skan 1688169689Skanclass SignalHandlerMark: public StackObj { 1689169689Skanprivate: 1690169689Skan Thread* _thread; 1691169689Skanpublic: 1692169689Skan SignalHandlerMark(Thread* t) { 1693169689Skan _thread = t; 1694169689Skan if (_thread) _thread->enter_signal_handler(); 1695169689Skan } 1696169689Skan ~SignalHandlerMark() { 1697169689Skan if (_thread) _thread->leave_signal_handler(); 1698169689Skan _thread = NULL; 1699169689Skan } 1700169689Skan}; 1701169689Skan 1702169689Skan// ParkEvents are type-stable and immortal. 1703169689Skan// 1704169689Skan// Lifecycle: Once a ParkEvent is associated with a thread that ParkEvent remains 1705169689Skan// associated with the thread for the thread's entire lifetime - the relationship is 1706169689Skan// stable. A thread will be associated at most one ParkEvent. When the thread 1707169689Skan// expires, the ParkEvent moves to the EventFreeList. New threads attempt to allocate from 1708169689Skan// the EventFreeList before creating a new Event. Type-stability frees us from 1709169689Skan// worrying about stale Event or Thread references in the objectMonitor subsystem. 1710169689Skan// (A reference to ParkEvent is always valid, even though the event may no longer be associated 1711169689Skan// with the desired or expected thread. A key aspect of this design is that the callers of 1712169689Skan// park, unpark, etc must tolerate stale references and spurious wakeups). 1713169689Skan// 1714169689Skan// Only the "associated" thread can block (park) on the ParkEvent, although 1715169689Skan// any other thread can unpark a reachable parkevent. Park() is allowed to 1716169689Skan// return spuriously. In fact park-unpark a really just an optimization to 1717169689Skan// avoid unbounded spinning and surrender the CPU to be a polite system citizen. 1718169689Skan// A degenerate albeit "impolite" park-unpark implementation could simply return. 1719169689Skan// See http://blogs.sun.com/dave for more details. 1720169689Skan// 1721169689Skan// Eventually I'd like to eliminate Events and ObjectWaiters, both of which serve as 1722169689Skan// thread proxies, and simply make the THREAD structure type-stable and persistent. 1723169689Skan// Currently, we unpark events associated with threads, but ideally we'd just 1724169689Skan// unpark threads. 1725169689Skan// 1726169689Skan// The base-class, PlatformEvent, is platform-specific while the ParkEvent is 1727169689Skan// platform-independent. PlatformEvent provides park(), unpark(), etc., and 1728169689Skan// is abstract -- that is, a PlatformEvent should never be instantiated except 1729169689Skan// as part of a ParkEvent. 1730169689Skan// Equivalently we could have defined a platform-independent base-class that 1731169689Skan// exported Allocate(), Release(), etc. The platform-specific class would extend 1732169689Skan// that base-class, adding park(), unpark(), etc. 1733169689Skan// 1734169689Skan// A word of caution: The JVM uses 2 very similar constructs: 1735169689Skan// 1. ParkEvent are used for Java-level "monitor" synchronization. 1736169689Skan// 2. Parkers are used by JSR166-JUC park-unpark. 1737169689Skan// 1738169689Skan// We'll want to eventually merge these redundant facilities and use ParkEvent. 1739169689Skan 1740169689Skan 1741169689Skanclass ParkEvent : public os::PlatformEvent { 1742169689Skan private: 1743169689Skan ParkEvent * FreeNext ; 1744169689Skan 1745169689Skan // Current association 1746169689Skan Thread * AssociatedWith ; 1747169689Skan intptr_t RawThreadIdentity ; // LWPID etc 1748169689Skan volatile int Incarnation ; 1749169689Skan 1750169689Skan // diagnostic : keep track of last thread to wake this thread. 1751169689Skan // this is useful for construction of dependency graphs. 1752169689Skan void * LastWaker ; 1753169689Skan 1754169689Skan public: 1755169689Skan // MCS-CLH list linkage and Native Mutex/Monitor 1756169689Skan ParkEvent * volatile ListNext ; 1757169689Skan ParkEvent * volatile ListPrev ; 1758169689Skan volatile intptr_t OnList ; 1759169689Skan volatile int TState ; 1760169689Skan volatile int Notified ; // for native monitor construct 1761169689Skan volatile int IsWaiting ; // Enqueued on WaitSet 1762169689Skan 1763169689Skan 1764169689Skan private: 1765169689Skan static ParkEvent * volatile FreeList ; 1766169689Skan static volatile int ListLock ; 1767169689Skan 1768169689Skan // It's prudent to mark the dtor as "private" 1769169689Skan // ensuring that it's not visible outside the package. 1770169689Skan // Unfortunately gcc warns about such usage, so 1771169689Skan // we revert to the less desirable "protected" visibility. 1772169689Skan // The other compilers accept private dtors. 1773169689Skan 1774169689Skan protected: // Ensure dtor is never invoked 1775169689Skan ~ParkEvent() { guarantee (0, "invariant") ; } 1776169689Skan 1777169689Skan ParkEvent() : PlatformEvent() { 1778169689Skan AssociatedWith = NULL ; 1779169689Skan FreeNext = NULL ; 1780169689Skan ListNext = NULL ; 1781169689Skan ListPrev = NULL ; 1782169689Skan OnList = 0 ; 1783169689Skan TState = 0 ; 1784169689Skan Notified = 0 ; 1785169689Skan IsWaiting = 0 ; 1786169689Skan } 1787169689Skan 1788169689Skan // We use placement-new to force ParkEvent instances to be 1789169689Skan // aligned on 256-byte address boundaries. This ensures that the least 1790169689Skan // significant byte of a ParkEvent address is always 0. 1791169689Skan 1792169689Skan void * operator new (size_t sz) ; 1793169689Skan void operator delete (void * a) ; 1794169689Skan 1795169689Skan public: 1796169689Skan static ParkEvent * Allocate (Thread * t) ; 1797169689Skan static void Release (ParkEvent * e) ; 1798169689Skan} ; 1799169689Skan