vm_operations.hpp revision 2062:3582bf76420e
1/*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_RUNTIME_VM_OPERATIONS_HPP
26#define SHARE_VM_RUNTIME_VM_OPERATIONS_HPP
27
28#include "classfile/javaClasses.hpp"
29#include "memory/allocation.hpp"
30#include "oops/oop.hpp"
31#include "runtime/thread.hpp"
32#include "utilities/top.hpp"
33
34// The following classes are used for operations
35// initiated by a Java thread but that must
36// take place in the VMThread.
37
38#define VM_OP_ENUM(type)   VMOp_##type,
39
40// Note: When new VM_XXX comes up, add 'XXX' to the template table.
41#define VM_OPS_DO(template)                       \
42  template(Dummy)                                 \
43  template(ThreadStop)                            \
44  template(ThreadDump)                            \
45  template(PrintThreads)                          \
46  template(FindDeadlocks)                         \
47  template(ForceSafepoint)                        \
48  template(ForceAsyncSafepoint)                   \
49  template(Deoptimize)                            \
50  template(DeoptimizeFrame)                       \
51  template(DeoptimizeAll)                         \
52  template(ZombieAll)                             \
53  template(UnlinkSymbols)                         \
54  template(HandleFullCodeCache)                   \
55  template(Verify)                                \
56  template(PrintJNI)                              \
57  template(HeapDumper)                            \
58  template(DeoptimizeTheWorld)                    \
59  template(GC_HeapInspection)                     \
60  template(GenCollectFull)                        \
61  template(GenCollectFullConcurrent)              \
62  template(GenCollectForAllocation)               \
63  template(GenCollectForPermanentAllocation)      \
64  template(ParallelGCFailedAllocation)            \
65  template(ParallelGCFailedPermanentAllocation)   \
66  template(ParallelGCSystemGC)                    \
67  template(CGC_Operation)                         \
68  template(CMS_Initial_Mark)                      \
69  template(CMS_Final_Remark)                      \
70  template(G1CollectFull)                         \
71  template(G1CollectForAllocation)                \
72  template(G1IncCollectionPause)                  \
73  template(EnableBiasedLocking)                   \
74  template(RevokeBias)                            \
75  template(BulkRevokeBias)                        \
76  template(PopulateDumpSharedSpace)               \
77  template(JNIFunctionTableCopier)                \
78  template(RedefineClasses)                       \
79  template(GetOwnedMonitorInfo)                   \
80  template(GetObjectMonitorUsage)                 \
81  template(GetCurrentContendedMonitor)            \
82  template(GetStackTrace)                         \
83  template(GetMultipleStackTraces)                \
84  template(GetAllStackTraces)                     \
85  template(GetThreadListStackTraces)              \
86  template(GetFrameCount)                         \
87  template(GetFrameLocation)                      \
88  template(ChangeBreakpoints)                     \
89  template(GetOrSetLocal)                         \
90  template(GetCurrentLocation)                    \
91  template(EnterInterpOnlyMode)                   \
92  template(ChangeSingleStep)                      \
93  template(HeapWalkOperation)                     \
94  template(HeapIterateOperation)                  \
95  template(ReportJavaOutOfMemory)                 \
96  template(Exit)                                  \
97
98class VM_Operation: public CHeapObj {
99 public:
100  enum Mode {
101    _safepoint,       // blocking,        safepoint, vm_op C-heap allocated
102    _no_safepoint,    // blocking,     no safepoint, vm_op C-Heap allocated
103    _concurrent,      // non-blocking, no safepoint, vm_op C-Heap allocated
104    _async_safepoint  // non-blocking,    safepoint, vm_op C-Heap allocated
105  };
106
107  enum VMOp_Type {
108    VM_OPS_DO(VM_OP_ENUM)
109    VMOp_Terminating
110  };
111
112 private:
113  Thread*         _calling_thread;
114  ThreadPriority  _priority;
115  long            _timestamp;
116  VM_Operation*   _next;
117  VM_Operation*   _prev;
118
119  // The VM operation name array
120  static const char* _names[];
121
122 public:
123  VM_Operation()  { _calling_thread = NULL; _next = NULL; _prev = NULL; }
124  virtual ~VM_Operation() {}
125
126  // VM operation support (used by VM thread)
127  Thread* calling_thread() const                 { return _calling_thread; }
128  ThreadPriority priority()                      { return _priority; }
129  void set_calling_thread(Thread* thread, ThreadPriority priority);
130
131  long timestamp() const              { return _timestamp; }
132  void set_timestamp(long timestamp)  { _timestamp = timestamp; }
133
134  // Called by VM thread - does in turn invoke doit(). Do not override this
135  void evaluate();
136
137  // evaluate() is called by the VMThread and in turn calls doit().
138  // If the thread invoking VMThread::execute((VM_Operation*) is a JavaThread,
139  // doit_prologue() is called in that thread before transferring control to
140  // the VMThread.
141  // If doit_prologue() returns true the VM operation will proceed, and
142  // doit_epilogue() will be called by the JavaThread once the VM operation
143  // completes. If doit_prologue() returns false the VM operation is cancelled.
144  virtual void doit()                            = 0;
145  virtual bool doit_prologue()                   { return true; };
146  virtual void doit_epilogue()                   {}; // Note: Not called if mode is: _concurrent
147
148  // Type test
149  virtual bool is_methodCompiler() const         { return false; }
150
151  // Linking
152  VM_Operation *next() const                     { return _next; }
153  VM_Operation *prev() const                     { return _prev; }
154  void set_next(VM_Operation *next)              { _next = next; }
155  void set_prev(VM_Operation *prev)              { _prev = prev; }
156
157  // Configuration. Override these appropriatly in subclasses.
158  virtual VMOp_Type type() const = 0;
159  virtual Mode evaluation_mode() const            { return _safepoint; }
160  virtual bool allow_nested_vm_operations() const { return false; }
161  virtual bool is_cheap_allocated() const         { return false; }
162  virtual void oops_do(OopClosure* f)              { /* do nothing */ };
163
164  // CAUTION: <don't hang yourself with following rope>
165  // If you override these methods, make sure that the evaluation
166  // of these methods is race-free and non-blocking, since these
167  // methods may be evaluated either by the mutators or by the
168  // vm thread, either concurrently with mutators or with the mutators
169  // stopped. In other words, taking locks is verboten, and if there
170  // are any races in evaluating the conditions, they'd better be benign.
171  virtual bool evaluate_at_safepoint() const {
172    return evaluation_mode() == _safepoint  ||
173           evaluation_mode() == _async_safepoint;
174  }
175  virtual bool evaluate_concurrently() const {
176    return evaluation_mode() == _concurrent ||
177           evaluation_mode() == _async_safepoint;
178  }
179
180  // Debugging
181  void print_on_error(outputStream* st) const;
182  const char* name() const { return _names[type()]; }
183  static const char* name(int type) {
184    assert(type >= 0 && type < VMOp_Terminating, "invalid VM operation type");
185    return _names[type];
186  }
187#ifndef PRODUCT
188  void print_on(outputStream* st) const { print_on_error(st); }
189#endif
190};
191
192class VM_ThreadStop: public VM_Operation {
193 private:
194  oop     _thread;        // The Thread that the Throwable is thrown against
195  oop     _throwable;     // The Throwable thrown at the target Thread
196 public:
197  // All oops are passed as JNI handles, since there is no guarantee that a GC might happen before the
198  // VM operation is executed.
199  VM_ThreadStop(oop thread, oop throwable) {
200    _thread    = thread;
201    _throwable = throwable;
202  }
203  VMOp_Type type() const                         { return VMOp_ThreadStop; }
204  oop target_thread() const                      { return _thread; }
205  oop throwable() const                          { return _throwable;}
206  void doit();
207  // We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated
208  bool allow_nested_vm_operations() const        { return true; }
209  Mode evaluation_mode() const                   { return _async_safepoint; }
210  bool is_cheap_allocated() const                { return true; }
211
212  // GC support
213  void oops_do(OopClosure* f) {
214    f->do_oop(&_thread); f->do_oop(&_throwable);
215  }
216};
217
218// dummy vm op, evaluated just to force a safepoint
219class VM_ForceSafepoint: public VM_Operation {
220 public:
221  VM_ForceSafepoint() {}
222  void doit()         {}
223  VMOp_Type type() const { return VMOp_ForceSafepoint; }
224};
225
226// dummy vm op, evaluated just to force a safepoint
227class VM_ForceAsyncSafepoint: public VM_Operation {
228 public:
229  VM_ForceAsyncSafepoint() {}
230  void doit()              {}
231  VMOp_Type type() const                         { return VMOp_ForceAsyncSafepoint; }
232  Mode evaluation_mode() const                   { return _async_safepoint; }
233  bool is_cheap_allocated() const                { return true; }
234};
235
236class VM_Deoptimize: public VM_Operation {
237 public:
238  VM_Deoptimize() {}
239  VMOp_Type type() const                        { return VMOp_Deoptimize; }
240  void doit();
241  bool allow_nested_vm_operations() const        { return true; }
242};
243
244
245// Deopt helper that can deoptimize frames in threads other than the
246// current thread.  Only used through Deoptimization::deoptimize_frame.
247class VM_DeoptimizeFrame: public VM_Operation {
248  friend class Deoptimization;
249
250 private:
251  JavaThread* _thread;
252  intptr_t*   _id;
253  VM_DeoptimizeFrame(JavaThread* thread, intptr_t* id);
254
255 public:
256  VMOp_Type type() const                         { return VMOp_DeoptimizeFrame; }
257  void doit();
258  bool allow_nested_vm_operations() const        { return true;  }
259};
260
261class VM_HandleFullCodeCache: public VM_Operation {
262 private:
263  bool  _is_full;
264 public:
265  VM_HandleFullCodeCache(bool is_full)           { _is_full = is_full; }
266  VMOp_Type type() const                         { return VMOp_HandleFullCodeCache; }
267  void doit();
268  bool allow_nested_vm_operations() const        { return true; }
269};
270
271#ifndef PRODUCT
272class VM_DeoptimizeAll: public VM_Operation {
273 private:
274  KlassHandle _dependee;
275 public:
276  VM_DeoptimizeAll() {}
277  VMOp_Type type() const                         { return VMOp_DeoptimizeAll; }
278  void doit();
279  bool allow_nested_vm_operations() const        { return true; }
280};
281
282
283class VM_ZombieAll: public VM_Operation {
284 public:
285  VM_ZombieAll() {}
286  VMOp_Type type() const                         { return VMOp_ZombieAll; }
287  void doit();
288  bool allow_nested_vm_operations() const        { return true; }
289};
290#endif // PRODUCT
291
292class VM_UnlinkSymbols: public VM_Operation {
293 public:
294  VM_UnlinkSymbols() {}
295  VMOp_Type type() const                         { return VMOp_UnlinkSymbols; }
296  void doit();
297  bool allow_nested_vm_operations() const        { return true; }
298};
299
300class VM_Verify: public VM_Operation {
301 private:
302  KlassHandle _dependee;
303 public:
304  VM_Verify() {}
305  VMOp_Type type() const { return VMOp_Verify; }
306  void doit();
307};
308
309
310class VM_PrintThreads: public VM_Operation {
311 private:
312  outputStream* _out;
313  bool _print_concurrent_locks;
314 public:
315  VM_PrintThreads()                                                { _out = tty; _print_concurrent_locks = PrintConcurrentLocks; }
316  VM_PrintThreads(outputStream* out, bool print_concurrent_locks)  { _out = out; _print_concurrent_locks = print_concurrent_locks; }
317  VMOp_Type type() const                                           {  return VMOp_PrintThreads; }
318  void doit();
319  bool doit_prologue();
320  void doit_epilogue();
321};
322
323class VM_PrintJNI: public VM_Operation {
324 private:
325  outputStream* _out;
326 public:
327  VM_PrintJNI()                         { _out = tty; }
328  VM_PrintJNI(outputStream* out)        { _out = out; }
329  VMOp_Type type() const                { return VMOp_PrintJNI; }
330  void doit();
331};
332
333class DeadlockCycle;
334class VM_FindDeadlocks: public VM_Operation {
335 private:
336  bool           _concurrent_locks;
337  DeadlockCycle* _deadlocks;
338  outputStream*  _out;
339
340 public:
341  VM_FindDeadlocks(bool concurrent_locks) :  _concurrent_locks(concurrent_locks), _out(NULL), _deadlocks(NULL) {};
342  VM_FindDeadlocks(outputStream* st) : _concurrent_locks(true), _out(st), _deadlocks(NULL) {};
343  ~VM_FindDeadlocks();
344
345  DeadlockCycle* result()      { return _deadlocks; };
346  VMOp_Type type() const       { return VMOp_FindDeadlocks; }
347  void doit();
348  bool doit_prologue();
349};
350
351class ThreadDumpResult;
352class ThreadSnapshot;
353class ThreadConcurrentLocks;
354
355class VM_ThreadDump : public VM_Operation {
356 private:
357  ThreadDumpResult*              _result;
358  int                            _num_threads;
359  GrowableArray<instanceHandle>* _threads;
360  int                            _max_depth;
361  bool                           _with_locked_monitors;
362  bool                           _with_locked_synchronizers;
363
364  ThreadSnapshot* snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl);
365
366 public:
367  VM_ThreadDump(ThreadDumpResult* result,
368                int max_depth,  // -1 indicates entire stack
369                bool with_locked_monitors,
370                bool with_locked_synchronizers);
371
372  VM_ThreadDump(ThreadDumpResult* result,
373                GrowableArray<instanceHandle>* threads,
374                int num_threads, // -1 indicates entire stack
375                int max_depth,
376                bool with_locked_monitors,
377                bool with_locked_synchronizers);
378
379  VMOp_Type type() const { return VMOp_ThreadDump; }
380  void doit();
381  bool doit_prologue();
382  void doit_epilogue();
383};
384
385
386class VM_Exit: public VM_Operation {
387 private:
388  int  _exit_code;
389  static volatile bool _vm_exited;
390  static Thread * _shutdown_thread;
391  static void wait_if_vm_exited();
392 public:
393  VM_Exit(int exit_code) {
394    _exit_code = exit_code;
395  }
396  static int wait_for_threads_in_native_to_block();
397  static int set_vm_exited();
398  static bool vm_exited()                      { return _vm_exited; }
399  static void block_if_vm_exited() {
400    if (_vm_exited) {
401      wait_if_vm_exited();
402    }
403  }
404  VMOp_Type type() const { return VMOp_Exit; }
405  void doit();
406};
407
408#endif // SHARE_VM_RUNTIME_VM_OPERATIONS_HPP
409