vmThread.cpp revision 4507:f36e073d56a4
1/*
2 * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "compiler/compileBroker.hpp"
27#include "gc_interface/collectedHeap.hpp"
28#include "memory/resourceArea.hpp"
29#include "oops/method.hpp"
30#include "oops/oop.inline.hpp"
31#include "runtime/interfaceSupport.hpp"
32#include "runtime/mutexLocker.hpp"
33#include "runtime/os.hpp"
34#include "runtime/thread.inline.hpp"
35#include "runtime/vmThread.hpp"
36#include "runtime/vm_operations.hpp"
37#include "services/runtimeService.hpp"
38#include "utilities/dtrace.hpp"
39#include "utilities/events.hpp"
40#include "utilities/xmlstream.hpp"
41
42#ifndef USDT2
43HS_DTRACE_PROBE_DECL3(hotspot, vmops__request, char *, uintptr_t, int);
44HS_DTRACE_PROBE_DECL3(hotspot, vmops__begin, char *, uintptr_t, int);
45HS_DTRACE_PROBE_DECL3(hotspot, vmops__end, char *, uintptr_t, int);
46#endif /* !USDT2 */
47
48// Dummy VM operation to act as first element in our circular double-linked list
49class VM_Dummy: public VM_Operation {
50  VMOp_Type type() const { return VMOp_Dummy; }
51  void  doit() {};
52};
53
54VMOperationQueue::VMOperationQueue() {
55  // The queue is a circular doubled-linked list, which always contains
56  // one element (i.e., one element means empty).
57  for(int i = 0; i < nof_priorities; i++) {
58    _queue_length[i] = 0;
59    _queue_counter = 0;
60    _queue[i] = new VM_Dummy();
61    _queue[i]->set_next(_queue[i]);
62    _queue[i]->set_prev(_queue[i]);
63  }
64  _drain_list = NULL;
65}
66
67
68bool VMOperationQueue::queue_empty(int prio) {
69  // It is empty if there is exactly one element
70  bool empty = (_queue[prio] == _queue[prio]->next());
71  assert( (_queue_length[prio] == 0 && empty) ||
72          (_queue_length[prio] > 0  && !empty), "sanity check");
73  return _queue_length[prio] == 0;
74}
75
76// Inserts an element to the right of the q element
77void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) {
78  assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check");
79  n->set_prev(q);
80  n->set_next(q->next());
81  q->next()->set_prev(n);
82  q->set_next(n);
83}
84
85void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) {
86  _queue_length[prio]++;
87  insert(_queue[prio]->next(), op);
88}
89
90void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) {
91  _queue_length[prio]++;
92  insert(_queue[prio]->prev(), op);
93}
94
95
96void VMOperationQueue::unlink(VM_Operation* q) {
97  assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check");
98  q->prev()->set_next(q->next());
99  q->next()->set_prev(q->prev());
100}
101
102VM_Operation* VMOperationQueue::queue_remove_front(int prio) {
103  if (queue_empty(prio)) return NULL;
104  assert(_queue_length[prio] >= 0, "sanity check");
105  _queue_length[prio]--;
106  VM_Operation* r = _queue[prio]->next();
107  assert(r != _queue[prio], "cannot remove base element");
108  unlink(r);
109  return r;
110}
111
112VM_Operation* VMOperationQueue::queue_drain(int prio) {
113  if (queue_empty(prio)) return NULL;
114  DEBUG_ONLY(int length = _queue_length[prio];);
115  assert(length >= 0, "sanity check");
116  _queue_length[prio] = 0;
117  VM_Operation* r = _queue[prio]->next();
118  assert(r != _queue[prio], "cannot remove base element");
119  // remove links to base element from head and tail
120  r->set_prev(NULL);
121  _queue[prio]->prev()->set_next(NULL);
122  // restore queue to empty state
123  _queue[prio]->set_next(_queue[prio]);
124  _queue[prio]->set_prev(_queue[prio]);
125  assert(queue_empty(prio), "drain corrupted queue");
126#ifdef ASSERT
127  int len = 0;
128  VM_Operation* cur;
129  for(cur = r; cur != NULL; cur=cur->next()) len++;
130  assert(len == length, "drain lost some ops");
131#endif
132  return r;
133}
134
135void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) {
136  VM_Operation* cur = _queue[queue];
137  cur = cur->next();
138  while (cur != _queue[queue]) {
139    cur->oops_do(f);
140    cur = cur->next();
141  }
142}
143
144void VMOperationQueue::drain_list_oops_do(OopClosure* f) {
145  VM_Operation* cur = _drain_list;
146  while (cur != NULL) {
147    cur->oops_do(f);
148    cur = cur->next();
149  }
150}
151
152//-----------------------------------------------------------------
153// High-level interface
154bool VMOperationQueue::add(VM_Operation *op) {
155
156#ifndef USDT2
157  HS_DTRACE_PROBE3(hotspot, vmops__request, op->name(), strlen(op->name()),
158                   op->evaluation_mode());
159#else /* USDT2 */
160  HOTSPOT_VMOPS_REQUEST(
161                   (char *) op->name(), strlen(op->name()),
162                   op->evaluation_mode());
163#endif /* USDT2 */
164
165  // Encapsulates VM queue policy. Currently, that
166  // only involves putting them on the right list
167  if (op->evaluate_at_safepoint()) {
168    queue_add_back(SafepointPriority, op);
169    return true;
170  }
171
172  queue_add_back(MediumPriority, op);
173  return true;
174}
175
176VM_Operation* VMOperationQueue::remove_next() {
177  // Assuming VMOperation queue is two-level priority queue. If there are
178  // more than two priorities, we need a different scheduling algorithm.
179  assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2,
180         "current algorithm does not work");
181
182  // simple counter based scheduling to prevent starvation of lower priority
183  // queue. -- see 4390175
184  int high_prio, low_prio;
185  if (_queue_counter++ < 10) {
186      high_prio = SafepointPriority;
187      low_prio  = MediumPriority;
188  } else {
189      _queue_counter = 0;
190      high_prio = MediumPriority;
191      low_prio  = SafepointPriority;
192  }
193
194  return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio);
195}
196
197void VMOperationQueue::oops_do(OopClosure* f) {
198  for(int i = 0; i < nof_priorities; i++) {
199    queue_oops_do(i, f);
200  }
201  drain_list_oops_do(f);
202}
203
204
205//------------------------------------------------------------------------------------------------------------------
206// Implementation of VMThread stuff
207
208bool                VMThread::_should_terminate   = false;
209bool              VMThread::_terminated         = false;
210Monitor*          VMThread::_terminate_lock     = NULL;
211VMThread*         VMThread::_vm_thread          = NULL;
212VM_Operation*     VMThread::_cur_vm_operation   = NULL;
213VMOperationQueue* VMThread::_vm_queue           = NULL;
214PerfCounter*      VMThread::_perf_accumulated_vm_operation_time = NULL;
215
216
217void VMThread::create() {
218  assert(vm_thread() == NULL, "we can only allocate one VMThread");
219  _vm_thread = new VMThread();
220
221  // Create VM operation queue
222  _vm_queue = new VMOperationQueue();
223  guarantee(_vm_queue != NULL, "just checking");
224
225  _terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true);
226
227  if (UsePerfData) {
228    // jvmstat performance counters
229    Thread* THREAD = Thread::current();
230    _perf_accumulated_vm_operation_time =
231                 PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime",
232                                                 PerfData::U_Ticks, CHECK);
233  }
234}
235
236
237VMThread::VMThread() : NamedThread() {
238  set_name("VM Thread");
239}
240
241void VMThread::destroy() {
242  if (_vm_thread != NULL) {
243    delete _vm_thread;
244    _vm_thread = NULL;      // VM thread is gone
245  }
246}
247
248void VMThread::run() {
249  assert(this == vm_thread(), "check");
250
251  this->initialize_thread_local_storage();
252  this->record_stack_base_and_size();
253  // Notify_lock wait checks on active_handles() to rewait in
254  // case of spurious wakeup, it should wait on the last
255  // value set prior to the notify
256  this->set_active_handles(JNIHandleBlock::allocate_block());
257
258  {
259    MutexLocker ml(Notify_lock);
260    Notify_lock->notify();
261  }
262  // Notify_lock is destroyed by Threads::create_vm()
263
264  int prio = (VMThreadPriority == -1)
265    ? os::java_to_os_priority[NearMaxPriority]
266    : VMThreadPriority;
267  // Note that I cannot call os::set_priority because it expects Java
268  // priorities and I am *explicitly* using OS priorities so that it's
269  // possible to set the VM thread priority higher than any Java thread.
270  os::set_native_priority( this, prio );
271
272  // Wait for VM_Operations until termination
273  this->loop();
274
275  // Note the intention to exit before safepointing.
276  // 6295565  This has the effect of waiting for any large tty
277  // outputs to finish.
278  if (xtty != NULL) {
279    ttyLocker ttyl;
280    xtty->begin_elem("destroy_vm");
281    xtty->stamp();
282    xtty->end_elem();
283    assert(should_terminate(), "termination flag must be set");
284  }
285
286  // 4526887 let VM thread exit at Safepoint
287  SafepointSynchronize::begin();
288
289  if (VerifyBeforeExit) {
290    HandleMark hm(VMThread::vm_thread());
291    // Among other things, this ensures that Eden top is correct.
292    Universe::heap()->prepare_for_verify();
293    os::check_heap();
294    // Silent verification so as not to pollute normal output,
295    // unless we really asked for it.
296    Universe::verify(!(PrintGCDetails || Verbose));
297  }
298
299  CompileBroker::set_should_block();
300
301  // wait for threads (compiler threads or daemon threads) in the
302  // _thread_in_native state to block.
303  VM_Exit::wait_for_threads_in_native_to_block();
304
305  // signal other threads that VM process is gone
306  {
307    // Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows
308    // VM thread to enter any lock at Safepoint as long as its _owner is NULL.
309    // If that happens after _terminate_lock->wait() has unset _owner
310    // but before it actually drops the lock and waits, the notification below
311    // may get lost and we will have a hang. To avoid this, we need to use
312    // Mutex::lock_without_safepoint_check().
313    MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag);
314    _terminated = true;
315    _terminate_lock->notify();
316  }
317
318  // Deletion must be done synchronously by the JNI DestroyJavaVM thread
319  // so that the VMThread deletion completes before the main thread frees
320  // up the CodeHeap.
321
322}
323
324
325// Notify the VMThread that the last non-daemon JavaThread has terminated,
326// and wait until operation is performed.
327void VMThread::wait_for_vm_thread_exit() {
328  { MutexLocker mu(VMOperationQueue_lock);
329    _should_terminate = true;
330    VMOperationQueue_lock->notify();
331  }
332
333  // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint
334  // because this thread has been removed from the threads list. But anything
335  // that could get blocked by Safepoint should not be used after this point,
336  // otherwise we will hang, since there is no one can end the safepoint.
337
338  // Wait until VM thread is terminated
339  // Note: it should be OK to use Terminator_lock here. But this is called
340  // at a very delicate time (VM shutdown) and we are operating in non- VM
341  // thread at Safepoint. It's safer to not share lock with other threads.
342  { MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag);
343    while(!VMThread::is_terminated()) {
344        _terminate_lock->wait(Mutex::_no_safepoint_check_flag);
345    }
346  }
347}
348
349void VMThread::print_on(outputStream* st) const {
350  st->print("\"%s\" ", name());
351  Thread::print_on(st);
352  st->cr();
353}
354
355void VMThread::evaluate_operation(VM_Operation* op) {
356  ResourceMark rm;
357
358  {
359    PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time());
360#ifndef USDT2
361    HS_DTRACE_PROBE3(hotspot, vmops__begin, op->name(), strlen(op->name()),
362                     op->evaluation_mode());
363#else /* USDT2 */
364    HOTSPOT_VMOPS_BEGIN(
365                     (char *) op->name(), strlen(op->name()),
366                     op->evaluation_mode());
367#endif /* USDT2 */
368    op->evaluate();
369#ifndef USDT2
370    HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()),
371                     op->evaluation_mode());
372#else /* USDT2 */
373    HOTSPOT_VMOPS_END(
374                     (char *) op->name(), strlen(op->name()),
375                     op->evaluation_mode());
376#endif /* USDT2 */
377  }
378
379  // Last access of info in _cur_vm_operation!
380  bool c_heap_allocated = op->is_cheap_allocated();
381
382  // Mark as completed
383  if (!op->evaluate_concurrently()) {
384    op->calling_thread()->increment_vm_operation_completed_count();
385  }
386  // It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call,
387  // since if it is stack allocated the calling thread might have deallocated
388  if (c_heap_allocated) {
389    delete _cur_vm_operation;
390  }
391}
392
393
394void VMThread::loop() {
395  assert(_cur_vm_operation == NULL, "no current one should be executing");
396
397  while(true) {
398    VM_Operation* safepoint_ops = NULL;
399    //
400    // Wait for VM operation
401    //
402    // use no_safepoint_check to get lock without attempting to "sneak"
403    { MutexLockerEx mu_queue(VMOperationQueue_lock,
404                             Mutex::_no_safepoint_check_flag);
405
406      // Look for new operation
407      assert(_cur_vm_operation == NULL, "no current one should be executing");
408      _cur_vm_operation = _vm_queue->remove_next();
409
410      // Stall time tracking code
411      if (PrintVMQWaitTime && _cur_vm_operation != NULL &&
412          !_cur_vm_operation->evaluate_concurrently()) {
413        long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp();
414        if (stall > 0)
415          tty->print_cr("%s stall: %Ld",  _cur_vm_operation->name(), stall);
416      }
417
418      while (!should_terminate() && _cur_vm_operation == NULL) {
419        // wait with a timeout to guarantee safepoints at regular intervals
420        bool timedout =
421          VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag,
422                                      GuaranteedSafepointInterval);
423
424        // Support for self destruction
425        if ((SelfDestructTimer != 0) && !is_error_reported() &&
426            (os::elapsedTime() > SelfDestructTimer * 60)) {
427          tty->print_cr("VM self-destructed");
428          exit(-1);
429        }
430
431        if (timedout && (SafepointALot ||
432                         SafepointSynchronize::is_cleanup_needed())) {
433          MutexUnlockerEx mul(VMOperationQueue_lock,
434                              Mutex::_no_safepoint_check_flag);
435          // Force a safepoint since we have not had one for at least
436          // 'GuaranteedSafepointInterval' milliseconds.  This will run all
437          // the clean-up processing that needs to be done regularly at a
438          // safepoint
439          SafepointSynchronize::begin();
440          #ifdef ASSERT
441            if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
442          #endif
443          SafepointSynchronize::end();
444        }
445        _cur_vm_operation = _vm_queue->remove_next();
446
447        // If we are at a safepoint we will evaluate all the operations that
448        // follow that also require a safepoint
449        if (_cur_vm_operation != NULL &&
450            _cur_vm_operation->evaluate_at_safepoint()) {
451          safepoint_ops = _vm_queue->drain_at_safepoint_priority();
452        }
453      }
454
455      if (should_terminate()) break;
456    } // Release mu_queue_lock
457
458    //
459    // Execute VM operation
460    //
461    { HandleMark hm(VMThread::vm_thread());
462
463      EventMark em("Executing VM operation: %s", vm_operation()->name());
464      assert(_cur_vm_operation != NULL, "we should have found an operation to execute");
465
466      // Give the VM thread an extra quantum.  Jobs tend to be bursty and this
467      // helps the VM thread to finish up the job.
468      // FIXME: When this is enabled and there are many threads, this can degrade
469      // performance significantly.
470      if( VMThreadHintNoPreempt )
471        os::hint_no_preempt();
472
473      // If we are at a safepoint we will evaluate all the operations that
474      // follow that also require a safepoint
475      if (_cur_vm_operation->evaluate_at_safepoint()) {
476
477        _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned
478
479        SafepointSynchronize::begin();
480        evaluate_operation(_cur_vm_operation);
481        // now process all queued safepoint ops, iteratively draining
482        // the queue until there are none left
483        do {
484          _cur_vm_operation = safepoint_ops;
485          if (_cur_vm_operation != NULL) {
486            do {
487              // evaluate_operation deletes the op object so we have
488              // to grab the next op now
489              VM_Operation* next = _cur_vm_operation->next();
490              _vm_queue->set_drain_list(next);
491              evaluate_operation(_cur_vm_operation);
492              _cur_vm_operation = next;
493              if (PrintSafepointStatistics) {
494                SafepointSynchronize::inc_vmop_coalesced_count();
495              }
496            } while (_cur_vm_operation != NULL);
497          }
498          // There is a chance that a thread enqueued a safepoint op
499          // since we released the op-queue lock and initiated the safepoint.
500          // So we drain the queue again if there is anything there, as an
501          // optimization to try and reduce the number of safepoints.
502          // As the safepoint synchronizes us with JavaThreads we will see
503          // any enqueue made by a JavaThread, but the peek will not
504          // necessarily detect a concurrent enqueue by a GC thread, but
505          // that simply means the op will wait for the next major cycle of the
506          // VMThread - just as it would if the GC thread lost the race for
507          // the lock.
508          if (_vm_queue->peek_at_safepoint_priority()) {
509            // must hold lock while draining queue
510            MutexLockerEx mu_queue(VMOperationQueue_lock,
511                                     Mutex::_no_safepoint_check_flag);
512            safepoint_ops = _vm_queue->drain_at_safepoint_priority();
513          } else {
514            safepoint_ops = NULL;
515          }
516        } while(safepoint_ops != NULL);
517
518        _vm_queue->set_drain_list(NULL);
519
520        // Complete safepoint synchronization
521        SafepointSynchronize::end();
522
523      } else {  // not a safepoint operation
524        if (TraceLongCompiles) {
525          elapsedTimer t;
526          t.start();
527          evaluate_operation(_cur_vm_operation);
528          t.stop();
529          double secs = t.seconds();
530          if (secs * 1e3 > LongCompileThreshold) {
531            // XXX - _cur_vm_operation should not be accessed after
532            // the completed count has been incremented; the waiting
533            // thread may have already freed this memory.
534            tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs);
535          }
536        } else {
537          evaluate_operation(_cur_vm_operation);
538        }
539
540        _cur_vm_operation = NULL;
541      }
542    }
543
544    //
545    //  Notify (potential) waiting Java thread(s) - lock without safepoint
546    //  check so that sneaking is not possible
547    { MutexLockerEx mu(VMOperationRequest_lock,
548                       Mutex::_no_safepoint_check_flag);
549      VMOperationRequest_lock->notify_all();
550    }
551
552    //
553    // We want to make sure that we get to a safepoint regularly.
554    //
555    if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) {
556      long interval          = SafepointSynchronize::last_non_safepoint_interval();
557      bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval);
558      if (SafepointALot || max_time_exceeded) {
559        HandleMark hm(VMThread::vm_thread());
560        SafepointSynchronize::begin();
561        SafepointSynchronize::end();
562      }
563    }
564  }
565}
566
567void VMThread::execute(VM_Operation* op) {
568  Thread* t = Thread::current();
569
570  if (!t->is_VM_thread()) {
571    SkipGCALot sgcalot(t);    // avoid re-entrant attempts to gc-a-lot
572    // JavaThread or WatcherThread
573    bool concurrent = op->evaluate_concurrently();
574    // only blocking VM operations need to verify the caller's safepoint state:
575    if (!concurrent) {
576      t->check_for_valid_safepoint_state(true);
577    }
578
579    // New request from Java thread, evaluate prologue
580    if (!op->doit_prologue()) {
581      return;   // op was cancelled
582    }
583
584    // Setup VM_operations for execution
585    op->set_calling_thread(t, Thread::get_priority(t));
586
587    // It does not make sense to execute the epilogue, if the VM operation object is getting
588    // deallocated by the VM thread.
589    bool execute_epilog = !op->is_cheap_allocated();
590    assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated");
591
592    // Get ticket number for non-concurrent VM operations
593    int ticket = 0;
594    if (!concurrent) {
595      ticket = t->vm_operation_ticket();
596    }
597
598    // Add VM operation to list of waiting threads. We are guaranteed not to block while holding the
599    // VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests
600    // to be queued up during a safepoint synchronization.
601    {
602      VMOperationQueue_lock->lock_without_safepoint_check();
603      bool ok = _vm_queue->add(op);
604      op->set_timestamp(os::javaTimeMillis());
605      VMOperationQueue_lock->notify();
606      VMOperationQueue_lock->unlock();
607      // VM_Operation got skipped
608      if (!ok) {
609        assert(concurrent, "can only skip concurrent tasks");
610        if (op->is_cheap_allocated()) delete op;
611        return;
612      }
613    }
614
615    if (!concurrent) {
616      // Wait for completion of request (non-concurrent)
617      // Note: only a JavaThread triggers the safepoint check when locking
618      MutexLocker mu(VMOperationRequest_lock);
619      while(t->vm_operation_completed_count() < ticket) {
620        VMOperationRequest_lock->wait(!t->is_Java_thread());
621      }
622    }
623
624    if (execute_epilog) {
625      op->doit_epilogue();
626    }
627  } else {
628    // invoked by VM thread; usually nested VM operation
629    assert(t->is_VM_thread(), "must be a VM thread");
630    VM_Operation* prev_vm_operation = vm_operation();
631    if (prev_vm_operation != NULL) {
632      // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler
633      // does not allow nested scavenges or compiles.
634      if (!prev_vm_operation->allow_nested_vm_operations()) {
635        fatal(err_msg("Nested VM operation %s requested by operation %s",
636                      op->name(), vm_operation()->name()));
637      }
638      op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority());
639    }
640
641    EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name());
642
643    // Release all internal handles after operation is evaluated
644    HandleMark hm(t);
645    _cur_vm_operation = op;
646
647    if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) {
648      SafepointSynchronize::begin();
649      op->evaluate();
650      SafepointSynchronize::end();
651    } else {
652      op->evaluate();
653    }
654
655    // Free memory if needed
656    if (op->is_cheap_allocated()) delete op;
657
658    _cur_vm_operation = prev_vm_operation;
659  }
660}
661
662
663void VMThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) {
664  Thread::oops_do(f, cld_f, cf);
665  _vm_queue->oops_do(f);
666}
667
668//------------------------------------------------------------------------------------------------------------------
669#ifndef PRODUCT
670
671void VMOperationQueue::verify_queue(int prio) {
672  // Check that list is correctly linked
673  int length = _queue_length[prio];
674  VM_Operation *cur = _queue[prio];
675  int i;
676
677  // Check forward links
678  for(i = 0; i < length; i++) {
679    cur = cur->next();
680    assert(cur != _queue[prio], "list to short (forward)");
681  }
682  assert(cur->next() == _queue[prio], "list to long (forward)");
683
684  // Check backwards links
685  cur = _queue[prio];
686  for(i = 0; i < length; i++) {
687    cur = cur->prev();
688    assert(cur != _queue[prio], "list to short (backwards)");
689  }
690  assert(cur->prev() == _queue[prio], "list to long (backwards)");
691}
692
693#endif
694
695void VMThread::verify() {
696  oops_do(&VerifyOopClosure::verify_oop, NULL, NULL);
697}
698