instanceKlass.cpp revision 2062:3582bf76420e
1/* 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "classfile/javaClasses.hpp" 27#include "classfile/systemDictionary.hpp" 28#include "classfile/verifier.hpp" 29#include "classfile/vmSymbols.hpp" 30#include "compiler/compileBroker.hpp" 31#include "gc_implementation/shared/markSweep.inline.hpp" 32#include "gc_interface/collectedHeap.inline.hpp" 33#include "interpreter/oopMapCache.hpp" 34#include "interpreter/rewriter.hpp" 35#include "jvmtifiles/jvmti.h" 36#include "memory/genOopClosures.inline.hpp" 37#include "memory/oopFactory.hpp" 38#include "memory/permGen.hpp" 39#include "oops/instanceKlass.hpp" 40#include "oops/instanceOop.hpp" 41#include "oops/methodOop.hpp" 42#include "oops/objArrayKlassKlass.hpp" 43#include "oops/oop.inline.hpp" 44#include "oops/symbol.hpp" 45#include "prims/jvmtiExport.hpp" 46#include "prims/jvmtiRedefineClassesTrace.hpp" 47#include "runtime/fieldDescriptor.hpp" 48#include "runtime/handles.inline.hpp" 49#include "runtime/javaCalls.hpp" 50#include "runtime/mutexLocker.hpp" 51#include "services/threadService.hpp" 52#include "utilities/dtrace.hpp" 53#ifdef TARGET_OS_FAMILY_linux 54# include "thread_linux.inline.hpp" 55#endif 56#ifdef TARGET_OS_FAMILY_solaris 57# include "thread_solaris.inline.hpp" 58#endif 59#ifdef TARGET_OS_FAMILY_windows 60# include "thread_windows.inline.hpp" 61#endif 62#ifndef SERIALGC 63#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 64#include "gc_implementation/g1/g1OopClosures.inline.hpp" 65#include "gc_implementation/g1/g1RemSet.inline.hpp" 66#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 67#include "gc_implementation/parNew/parOopClosures.inline.hpp" 68#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 69#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" 70#include "oops/oop.pcgc.inline.hpp" 71#endif 72#ifdef COMPILER1 73#include "c1/c1_Compiler.hpp" 74#endif 75 76#ifdef DTRACE_ENABLED 77 78HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required, 79 char*, intptr_t, oop, intptr_t); 80HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive, 81 char*, intptr_t, oop, intptr_t, int); 82HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent, 83 char*, intptr_t, oop, intptr_t, int); 84HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous, 85 char*, intptr_t, oop, intptr_t, int); 86HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed, 87 char*, intptr_t, oop, intptr_t, int); 88HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit, 89 char*, intptr_t, oop, intptr_t, int); 90HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error, 91 char*, intptr_t, oop, intptr_t, int); 92HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end, 93 char*, intptr_t, oop, intptr_t, int); 94 95#define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \ 96 { \ 97 char* data = NULL; \ 98 int len = 0; \ 99 Symbol* name = (clss)->name(); \ 100 if (name != NULL) { \ 101 data = (char*)name->bytes(); \ 102 len = name->utf8_length(); \ 103 } \ 104 HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \ 105 data, len, (clss)->class_loader(), thread_type); \ 106 } 107 108#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \ 109 { \ 110 char* data = NULL; \ 111 int len = 0; \ 112 Symbol* name = (clss)->name(); \ 113 if (name != NULL) { \ 114 data = (char*)name->bytes(); \ 115 len = name->utf8_length(); \ 116 } \ 117 HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \ 118 data, len, (clss)->class_loader(), thread_type, wait); \ 119 } 120 121#else // ndef DTRACE_ENABLED 122 123#define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) 124#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) 125 126#endif // ndef DTRACE_ENABLED 127 128bool instanceKlass::should_be_initialized() const { 129 return !is_initialized(); 130} 131 132klassVtable* instanceKlass::vtable() const { 133 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size()); 134} 135 136klassItable* instanceKlass::itable() const { 137 return new klassItable(as_klassOop()); 138} 139 140void instanceKlass::eager_initialize(Thread *thread) { 141 if (!EagerInitialization) return; 142 143 if (this->is_not_initialized()) { 144 // abort if the the class has a class initializer 145 if (this->class_initializer() != NULL) return; 146 147 // abort if it is java.lang.Object (initialization is handled in genesis) 148 klassOop super = this->super(); 149 if (super == NULL) return; 150 151 // abort if the super class should be initialized 152 if (!instanceKlass::cast(super)->is_initialized()) return; 153 154 // call body to expose the this pointer 155 instanceKlassHandle this_oop(thread, this->as_klassOop()); 156 eager_initialize_impl(this_oop); 157 } 158} 159 160 161void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) { 162 EXCEPTION_MARK; 163 ObjectLocker ol(this_oop, THREAD); 164 165 // abort if someone beat us to the initialization 166 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized() 167 168 ClassState old_state = this_oop->_init_state; 169 link_class_impl(this_oop, true, THREAD); 170 if (HAS_PENDING_EXCEPTION) { 171 CLEAR_PENDING_EXCEPTION; 172 // Abort if linking the class throws an exception. 173 174 // Use a test to avoid redundantly resetting the state if there's 175 // no change. Set_init_state() asserts that state changes make 176 // progress, whereas here we might just be spinning in place. 177 if( old_state != this_oop->_init_state ) 178 this_oop->set_init_state (old_state); 179 } else { 180 // linking successfull, mark class as initialized 181 this_oop->set_init_state (fully_initialized); 182 // trace 183 if (TraceClassInitialization) { 184 ResourceMark rm(THREAD); 185 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name()); 186 } 187 } 188} 189 190 191// See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization 192// process. The step comments refers to the procedure described in that section. 193// Note: implementation moved to static method to expose the this pointer. 194void instanceKlass::initialize(TRAPS) { 195 if (this->should_be_initialized()) { 196 HandleMark hm(THREAD); 197 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 198 initialize_impl(this_oop, CHECK); 199 // Note: at this point the class may be initialized 200 // OR it may be in the state of being initialized 201 // in case of recursive initialization! 202 } else { 203 assert(is_initialized(), "sanity check"); 204 } 205} 206 207 208bool instanceKlass::verify_code( 209 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 210 // 1) Verify the bytecodes 211 Verifier::Mode mode = 212 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException; 213 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false); 214} 215 216 217// Used exclusively by the shared spaces dump mechanism to prevent 218// classes mapped into the shared regions in new VMs from appearing linked. 219 220void instanceKlass::unlink_class() { 221 assert(is_linked(), "must be linked"); 222 _init_state = loaded; 223} 224 225void instanceKlass::link_class(TRAPS) { 226 assert(is_loaded(), "must be loaded"); 227 if (!is_linked()) { 228 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 229 link_class_impl(this_oop, true, CHECK); 230 } 231} 232 233// Called to verify that a class can link during initialization, without 234// throwing a VerifyError. 235bool instanceKlass::link_class_or_fail(TRAPS) { 236 assert(is_loaded(), "must be loaded"); 237 if (!is_linked()) { 238 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 239 link_class_impl(this_oop, false, CHECK_false); 240 } 241 return is_linked(); 242} 243 244bool instanceKlass::link_class_impl( 245 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) { 246 // check for error state 247 if (this_oop->is_in_error_state()) { 248 ResourceMark rm(THREAD); 249 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(), 250 this_oop->external_name(), false); 251 } 252 // return if already verified 253 if (this_oop->is_linked()) { 254 return true; 255 } 256 257 // Timing 258 // timer handles recursion 259 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl"); 260 JavaThread* jt = (JavaThread*)THREAD; 261 262 // link super class before linking this class 263 instanceKlassHandle super(THREAD, this_oop->super()); 264 if (super.not_null()) { 265 if (super->is_interface()) { // check if super class is an interface 266 ResourceMark rm(THREAD); 267 Exceptions::fthrow( 268 THREAD_AND_LOCATION, 269 vmSymbols::java_lang_IncompatibleClassChangeError(), 270 "class %s has interface %s as super class", 271 this_oop->external_name(), 272 super->external_name() 273 ); 274 return false; 275 } 276 277 link_class_impl(super, throw_verifyerror, CHECK_false); 278 } 279 280 // link all interfaces implemented by this class before linking this class 281 objArrayHandle interfaces (THREAD, this_oop->local_interfaces()); 282 int num_interfaces = interfaces->length(); 283 for (int index = 0; index < num_interfaces; index++) { 284 HandleMark hm(THREAD); 285 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index))); 286 link_class_impl(ih, throw_verifyerror, CHECK_false); 287 } 288 289 // in case the class is linked in the process of linking its superclasses 290 if (this_oop->is_linked()) { 291 return true; 292 } 293 294 // trace only the link time for this klass that includes 295 // the verification time 296 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(), 297 ClassLoader::perf_class_link_selftime(), 298 ClassLoader::perf_classes_linked(), 299 jt->get_thread_stat()->perf_recursion_counts_addr(), 300 jt->get_thread_stat()->perf_timers_addr(), 301 PerfClassTraceTime::CLASS_LINK); 302 303 // verification & rewriting 304 { 305 ObjectLocker ol(this_oop, THREAD); 306 // rewritten will have been set if loader constraint error found 307 // on an earlier link attempt 308 // don't verify or rewrite if already rewritten 309 if (!this_oop->is_linked()) { 310 if (!this_oop->is_rewritten()) { 311 { 312 // Timer includes any side effects of class verification (resolution, 313 // etc), but not recursive entry into verify_code(). 314 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(), 315 ClassLoader::perf_class_verify_selftime(), 316 ClassLoader::perf_classes_verified(), 317 jt->get_thread_stat()->perf_recursion_counts_addr(), 318 jt->get_thread_stat()->perf_timers_addr(), 319 PerfClassTraceTime::CLASS_VERIFY); 320 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD); 321 if (!verify_ok) { 322 return false; 323 } 324 } 325 326 // Just in case a side-effect of verify linked this class already 327 // (which can sometimes happen since the verifier loads classes 328 // using custom class loaders, which are free to initialize things) 329 if (this_oop->is_linked()) { 330 return true; 331 } 332 333 // also sets rewritten 334 this_oop->rewrite_class(CHECK_false); 335 } 336 337 // Initialize the vtable and interface table after 338 // methods have been rewritten since rewrite may 339 // fabricate new methodOops. 340 // also does loader constraint checking 341 if (!this_oop()->is_shared()) { 342 ResourceMark rm(THREAD); 343 this_oop->vtable()->initialize_vtable(true, CHECK_false); 344 this_oop->itable()->initialize_itable(true, CHECK_false); 345 } 346#ifdef ASSERT 347 else { 348 ResourceMark rm(THREAD); 349 this_oop->vtable()->verify(tty, true); 350 // In case itable verification is ever added. 351 // this_oop->itable()->verify(tty, true); 352 } 353#endif 354 this_oop->set_init_state(linked); 355 if (JvmtiExport::should_post_class_prepare()) { 356 Thread *thread = THREAD; 357 assert(thread->is_Java_thread(), "thread->is_Java_thread()"); 358 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop()); 359 } 360 } 361 } 362 return true; 363} 364 365 366// Rewrite the byte codes of all of the methods of a class. 367// Three cases: 368// During the link of a newly loaded class. 369// During the preloading of classes to be written to the shared spaces. 370// - Rewrite the methods and update the method entry points. 371// 372// During the link of a class in the shared spaces. 373// - The methods were already rewritten, update the metho entry points. 374// 375// The rewriter must be called exactly once. Rewriting must happen after 376// verification but before the first method of the class is executed. 377 378void instanceKlass::rewrite_class(TRAPS) { 379 assert(is_loaded(), "must be loaded"); 380 instanceKlassHandle this_oop(THREAD, this->as_klassOop()); 381 if (this_oop->is_rewritten()) { 382 assert(this_oop()->is_shared(), "rewriting an unshared class?"); 383 return; 384 } 385 Rewriter::rewrite(this_oop, CHECK); // No exception can happen here 386 this_oop->set_rewritten(); 387} 388 389 390void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) { 391 // Make sure klass is linked (verified) before initialization 392 // A class could already be verified, since it has been reflected upon. 393 this_oop->link_class(CHECK); 394 395 DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1); 396 397 bool wait = false; 398 399 // refer to the JVM book page 47 for description of steps 400 // Step 1 401 { ObjectLocker ol(this_oop, THREAD); 402 403 Thread *self = THREAD; // it's passed the current thread 404 405 // Step 2 406 // If we were to use wait() instead of waitInterruptibly() then 407 // we might end up throwing IE from link/symbol resolution sites 408 // that aren't expected to throw. This would wreak havoc. See 6320309. 409 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) { 410 wait = true; 411 ol.waitUninterruptibly(CHECK); 412 } 413 414 // Step 3 415 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) { 416 DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait); 417 return; 418 } 419 420 // Step 4 421 if (this_oop->is_initialized()) { 422 DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait); 423 return; 424 } 425 426 // Step 5 427 if (this_oop->is_in_error_state()) { 428 DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait); 429 ResourceMark rm(THREAD); 430 const char* desc = "Could not initialize class "; 431 const char* className = this_oop->external_name(); 432 size_t msglen = strlen(desc) + strlen(className) + 1; 433 char* message = NEW_RESOURCE_ARRAY(char, msglen); 434 if (NULL == message) { 435 // Out of memory: can't create detailed error message 436 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className); 437 } else { 438 jio_snprintf(message, msglen, "%s%s", desc, className); 439 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message); 440 } 441 } 442 443 // Step 6 444 this_oop->set_init_state(being_initialized); 445 this_oop->set_init_thread(self); 446 } 447 448 // Step 7 449 klassOop super_klass = this_oop->super(); 450 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) { 451 Klass::cast(super_klass)->initialize(THREAD); 452 453 if (HAS_PENDING_EXCEPTION) { 454 Handle e(THREAD, PENDING_EXCEPTION); 455 CLEAR_PENDING_EXCEPTION; 456 { 457 EXCEPTION_MARK; 458 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads 459 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below 460 } 461 DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait); 462 THROW_OOP(e()); 463 } 464 } 465 466 // Step 8 467 { 468 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl"); 469 JavaThread* jt = (JavaThread*)THREAD; 470 DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait); 471 // Timer includes any side effects of class initialization (resolution, 472 // etc), but not recursive entry into call_class_initializer(). 473 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(), 474 ClassLoader::perf_class_init_selftime(), 475 ClassLoader::perf_classes_inited(), 476 jt->get_thread_stat()->perf_recursion_counts_addr(), 477 jt->get_thread_stat()->perf_timers_addr(), 478 PerfClassTraceTime::CLASS_CLINIT); 479 this_oop->call_class_initializer(THREAD); 480 } 481 482 // Step 9 483 if (!HAS_PENDING_EXCEPTION) { 484 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK); 485 { ResourceMark rm(THREAD); 486 debug_only(this_oop->vtable()->verify(tty, true);) 487 } 488 } 489 else { 490 // Step 10 and 11 491 Handle e(THREAD, PENDING_EXCEPTION); 492 CLEAR_PENDING_EXCEPTION; 493 { 494 EXCEPTION_MARK; 495 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); 496 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below 497 } 498 DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait); 499 if (e->is_a(SystemDictionary::Error_klass())) { 500 THROW_OOP(e()); 501 } else { 502 JavaCallArguments args(e); 503 THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(), 504 vmSymbols::throwable_void_signature(), 505 &args); 506 } 507 } 508 DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait); 509} 510 511 512// Note: implementation moved to static method to expose the this pointer. 513void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) { 514 instanceKlassHandle kh(THREAD, this->as_klassOop()); 515 set_initialization_state_and_notify_impl(kh, state, CHECK); 516} 517 518void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) { 519 ObjectLocker ol(this_oop, THREAD); 520 this_oop->set_init_state(state); 521 ol.notify_all(CHECK); 522} 523 524void instanceKlass::add_implementor(klassOop k) { 525 assert(Compile_lock->owned_by_self(), ""); 526 // Filter out my subinterfaces. 527 // (Note: Interfaces are never on the subklass list.) 528 if (instanceKlass::cast(k)->is_interface()) return; 529 530 // Filter out subclasses whose supers already implement me. 531 // (Note: CHA must walk subclasses of direct implementors 532 // in order to locate indirect implementors.) 533 klassOop sk = instanceKlass::cast(k)->super(); 534 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop())) 535 // We only need to check one immediate superclass, since the 536 // implements_interface query looks at transitive_interfaces. 537 // Any supers of the super have the same (or fewer) transitive_interfaces. 538 return; 539 540 // Update number of implementors 541 int i = _nof_implementors++; 542 543 // Record this implementor, if there are not too many already 544 if (i < implementors_limit) { 545 assert(_implementors[i] == NULL, "should be exactly one implementor"); 546 oop_store_without_check((oop*)&_implementors[i], k); 547 } else if (i == implementors_limit) { 548 // clear out the list on first overflow 549 for (int i2 = 0; i2 < implementors_limit; i2++) 550 oop_store_without_check((oop*)&_implementors[i2], NULL); 551 } 552 553 // The implementor also implements the transitive_interfaces 554 for (int index = 0; index < local_interfaces()->length(); index++) { 555 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k); 556 } 557} 558 559void instanceKlass::init_implementor() { 560 for (int i = 0; i < implementors_limit; i++) 561 oop_store_without_check((oop*)&_implementors[i], NULL); 562 _nof_implementors = 0; 563} 564 565 566void instanceKlass::process_interfaces(Thread *thread) { 567 // link this class into the implementors list of every interface it implements 568 KlassHandle this_as_oop (thread, this->as_klassOop()); 569 for (int i = local_interfaces()->length() - 1; i >= 0; i--) { 570 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass"); 571 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i))); 572 assert(interf->is_interface(), "expected interface"); 573 interf->add_implementor(this_as_oop()); 574 } 575} 576 577bool instanceKlass::can_be_primary_super_slow() const { 578 if (is_interface()) 579 return false; 580 else 581 return Klass::can_be_primary_super_slow(); 582} 583 584objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) { 585 // The secondaries are the implemented interfaces. 586 instanceKlass* ik = instanceKlass::cast(as_klassOop()); 587 objArrayHandle interfaces (THREAD, ik->transitive_interfaces()); 588 int num_secondaries = num_extra_slots + interfaces->length(); 589 if (num_secondaries == 0) { 590 return Universe::the_empty_system_obj_array(); 591 } else if (num_extra_slots == 0) { 592 return interfaces(); 593 } else { 594 // a mix of both 595 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL); 596 for (int i = 0; i < interfaces->length(); i++) { 597 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i)); 598 } 599 return secondaries; 600 } 601} 602 603bool instanceKlass::compute_is_subtype_of(klassOop k) { 604 if (Klass::cast(k)->is_interface()) { 605 return implements_interface(k); 606 } else { 607 return Klass::compute_is_subtype_of(k); 608 } 609} 610 611bool instanceKlass::implements_interface(klassOop k) const { 612 if (as_klassOop() == k) return true; 613 assert(Klass::cast(k)->is_interface(), "should be an interface class"); 614 for (int i = 0; i < transitive_interfaces()->length(); i++) { 615 if (transitive_interfaces()->obj_at(i) == k) { 616 return true; 617 } 618 } 619 return false; 620} 621 622objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) { 623 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException()); 624 if (length > arrayOopDesc::max_array_length(T_OBJECT)) { 625 report_java_out_of_memory("Requested array size exceeds VM limit"); 626 THROW_OOP_0(Universe::out_of_memory_error_array_size()); 627 } 628 int size = objArrayOopDesc::object_size(length); 629 klassOop ak = array_klass(n, CHECK_NULL); 630 KlassHandle h_ak (THREAD, ak); 631 objArrayOop o = 632 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL); 633 return o; 634} 635 636instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) { 637 if (TraceFinalizerRegistration) { 638 tty->print("Registered "); 639 i->print_value_on(tty); 640 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i); 641 } 642 instanceHandle h_i(THREAD, i); 643 // Pass the handle as argument, JavaCalls::call expects oop as jobjects 644 JavaValue result(T_VOID); 645 JavaCallArguments args(h_i); 646 methodHandle mh (THREAD, Universe::finalizer_register_method()); 647 JavaCalls::call(&result, mh, &args, CHECK_NULL); 648 return h_i(); 649} 650 651instanceOop instanceKlass::allocate_instance(TRAPS) { 652 bool has_finalizer_flag = has_finalizer(); // Query before possible GC 653 int size = size_helper(); // Query before forming handle. 654 655 KlassHandle h_k(THREAD, as_klassOop()); 656 657 instanceOop i; 658 659 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL); 660 if (has_finalizer_flag && !RegisterFinalizersAtInit) { 661 i = register_finalizer(i, CHECK_NULL); 662 } 663 return i; 664} 665 666instanceOop instanceKlass::allocate_permanent_instance(TRAPS) { 667 // Finalizer registration occurs in the Object.<init> constructor 668 // and constructors normally aren't run when allocating perm 669 // instances so simply disallow finalizable perm objects. This can 670 // be relaxed if a need for it is found. 671 assert(!has_finalizer(), "perm objects not allowed to have finalizers"); 672 int size = size_helper(); // Query before forming handle. 673 KlassHandle h_k(THREAD, as_klassOop()); 674 instanceOop i = (instanceOop) 675 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL); 676 return i; 677} 678 679void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) { 680 if (is_interface() || is_abstract()) { 681 ResourceMark rm(THREAD); 682 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError() 683 : vmSymbols::java_lang_InstantiationException(), external_name()); 684 } 685 if (as_klassOop() == SystemDictionary::Class_klass()) { 686 ResourceMark rm(THREAD); 687 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError() 688 : vmSymbols::java_lang_IllegalAccessException(), external_name()); 689 } 690} 691 692klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) { 693 instanceKlassHandle this_oop(THREAD, as_klassOop()); 694 return array_klass_impl(this_oop, or_null, n, THREAD); 695} 696 697klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) { 698 if (this_oop->array_klasses() == NULL) { 699 if (or_null) return NULL; 700 701 ResourceMark rm; 702 JavaThread *jt = (JavaThread *)THREAD; 703 { 704 // Atomic creation of array_klasses 705 MutexLocker mc(Compile_lock, THREAD); // for vtables 706 MutexLocker ma(MultiArray_lock, THREAD); 707 708 // Check if update has already taken place 709 if (this_oop->array_klasses() == NULL) { 710 objArrayKlassKlass* oakk = 711 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part(); 712 713 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL); 714 this_oop->set_array_klasses(k); 715 } 716 } 717 } 718 // _this will always be set at this point 719 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part(); 720 if (or_null) { 721 return oak->array_klass_or_null(n); 722 } 723 return oak->array_klass(n, CHECK_NULL); 724} 725 726klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) { 727 return array_klass_impl(or_null, 1, THREAD); 728} 729 730void instanceKlass::call_class_initializer(TRAPS) { 731 instanceKlassHandle ik (THREAD, as_klassOop()); 732 call_class_initializer_impl(ik, THREAD); 733} 734 735static int call_class_initializer_impl_counter = 0; // for debugging 736 737methodOop instanceKlass::class_initializer() { 738 return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature()); 739} 740 741void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) { 742 methodHandle h_method(THREAD, this_oop->class_initializer()); 743 assert(!this_oop->is_initialized(), "we cannot initialize twice"); 744 if (TraceClassInitialization) { 745 tty->print("%d Initializing ", call_class_initializer_impl_counter++); 746 this_oop->name()->print_value(); 747 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop()); 748 } 749 if (h_method() != NULL) { 750 JavaCallArguments args; // No arguments 751 JavaValue result(T_VOID); 752 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args) 753 } 754} 755 756 757void instanceKlass::mask_for(methodHandle method, int bci, 758 InterpreterOopMap* entry_for) { 759 // Dirty read, then double-check under a lock. 760 if (_oop_map_cache == NULL) { 761 // Otherwise, allocate a new one. 762 MutexLocker x(OopMapCacheAlloc_lock); 763 // First time use. Allocate a cache in C heap 764 if (_oop_map_cache == NULL) { 765 _oop_map_cache = new OopMapCache(); 766 } 767 } 768 // _oop_map_cache is constant after init; lookup below does is own locking. 769 _oop_map_cache->lookup(method, bci, entry_for); 770} 771 772 773bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 774 const int n = fields()->length(); 775 for (int i = 0; i < n; i += next_offset ) { 776 int name_index = fields()->ushort_at(i + name_index_offset); 777 int sig_index = fields()->ushort_at(i + signature_index_offset); 778 Symbol* f_name = constants()->symbol_at(name_index); 779 Symbol* f_sig = constants()->symbol_at(sig_index); 780 if (f_name == name && f_sig == sig) { 781 fd->initialize(as_klassOop(), i); 782 return true; 783 } 784 } 785 return false; 786} 787 788 789void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) { 790 Klass::shared_symbols_iterate(closure); 791 closure->do_symbol(&_generic_signature); 792 closure->do_symbol(&_source_file_name); 793 closure->do_symbol(&_source_debug_extension); 794 795 const int n = fields()->length(); 796 for (int i = 0; i < n; i += next_offset ) { 797 int name_index = fields()->ushort_at(i + name_index_offset); 798 closure->do_symbol(constants()->symbol_at_addr(name_index)); 799 int sig_index = fields()->ushort_at(i + signature_index_offset); 800 closure->do_symbol(constants()->symbol_at_addr(sig_index)); 801 } 802} 803 804 805klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 806 const int n = local_interfaces()->length(); 807 for (int i = 0; i < n; i++) { 808 klassOop intf1 = klassOop(local_interfaces()->obj_at(i)); 809 assert(Klass::cast(intf1)->is_interface(), "just checking type"); 810 // search for field in current interface 811 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) { 812 assert(fd->is_static(), "interface field must be static"); 813 return intf1; 814 } 815 // search for field in direct superinterfaces 816 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd); 817 if (intf2 != NULL) return intf2; 818 } 819 // otherwise field lookup fails 820 return NULL; 821} 822 823 824klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const { 825 // search order according to newest JVM spec (5.4.3.2, p.167). 826 // 1) search for field in current klass 827 if (find_local_field(name, sig, fd)) { 828 return as_klassOop(); 829 } 830 // 2) search for field recursively in direct superinterfaces 831 { klassOop intf = find_interface_field(name, sig, fd); 832 if (intf != NULL) return intf; 833 } 834 // 3) apply field lookup recursively if superclass exists 835 { klassOop supr = super(); 836 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd); 837 } 838 // 4) otherwise field lookup fails 839 return NULL; 840} 841 842 843klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const { 844 // search order according to newest JVM spec (5.4.3.2, p.167). 845 // 1) search for field in current klass 846 if (find_local_field(name, sig, fd)) { 847 if (fd->is_static() == is_static) return as_klassOop(); 848 } 849 // 2) search for field recursively in direct superinterfaces 850 if (is_static) { 851 klassOop intf = find_interface_field(name, sig, fd); 852 if (intf != NULL) return intf; 853 } 854 // 3) apply field lookup recursively if superclass exists 855 { klassOop supr = super(); 856 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd); 857 } 858 // 4) otherwise field lookup fails 859 return NULL; 860} 861 862 863bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 864 int length = fields()->length(); 865 for (int i = 0; i < length; i += next_offset) { 866 if (offset_from_fields( i ) == offset) { 867 fd->initialize(as_klassOop(), i); 868 if (fd->is_static() == is_static) return true; 869 } 870 } 871 return false; 872} 873 874 875bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const { 876 klassOop klass = as_klassOop(); 877 while (klass != NULL) { 878 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) { 879 return true; 880 } 881 klass = Klass::cast(klass)->super(); 882 } 883 return false; 884} 885 886 887void instanceKlass::methods_do(void f(methodOop method)) { 888 int len = methods()->length(); 889 for (int index = 0; index < len; index++) { 890 methodOop m = methodOop(methods()->obj_at(index)); 891 assert(m->is_method(), "must be method"); 892 f(m); 893 } 894} 895 896void instanceKlass::do_local_static_fields(FieldClosure* cl) { 897 fieldDescriptor fd; 898 int length = fields()->length(); 899 for (int i = 0; i < length; i += next_offset) { 900 fd.initialize(as_klassOop(), i); 901 if (fd.is_static()) cl->do_field(&fd); 902 } 903} 904 905 906void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) { 907 instanceKlassHandle h_this(THREAD, as_klassOop()); 908 do_local_static_fields_impl(h_this, f, CHECK); 909} 910 911 912void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) { 913 fieldDescriptor fd; 914 int length = this_oop->fields()->length(); 915 for (int i = 0; i < length; i += next_offset) { 916 fd.initialize(this_oop(), i); 917 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements) 918 } 919} 920 921 922static int compare_fields_by_offset(int* a, int* b) { 923 return a[0] - b[0]; 924} 925 926void instanceKlass::do_nonstatic_fields(FieldClosure* cl) { 927 instanceKlass* super = superklass(); 928 if (super != NULL) { 929 super->do_nonstatic_fields(cl); 930 } 931 fieldDescriptor fd; 932 int length = fields()->length(); 933 // In DebugInfo nonstatic fields are sorted by offset. 934 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1)); 935 int j = 0; 936 for (int i = 0; i < length; i += next_offset) { 937 fd.initialize(as_klassOop(), i); 938 if (!fd.is_static()) { 939 fields_sorted[j + 0] = fd.offset(); 940 fields_sorted[j + 1] = i; 941 j += 2; 942 } 943 } 944 if (j > 0) { 945 length = j; 946 // _sort_Fn is defined in growableArray.hpp. 947 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset); 948 for (int i = 0; i < length; i += 2) { 949 fd.initialize(as_klassOop(), fields_sorted[i + 1]); 950 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields"); 951 cl->do_field(&fd); 952 } 953 } 954 FREE_C_HEAP_ARRAY(int, fields_sorted); 955} 956 957 958void instanceKlass::array_klasses_do(void f(klassOop k)) { 959 if (array_klasses() != NULL) 960 arrayKlass::cast(array_klasses())->array_klasses_do(f); 961} 962 963 964void instanceKlass::with_array_klasses_do(void f(klassOop k)) { 965 f(as_klassOop()); 966 array_klasses_do(f); 967} 968 969#ifdef ASSERT 970static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) { 971 int len = methods->length(); 972 for (int index = 0; index < len; index++) { 973 methodOop m = (methodOop)(methods->obj_at(index)); 974 assert(m->is_method(), "must be method"); 975 if (m->signature() == signature && m->name() == name) { 976 return index; 977 } 978 } 979 return -1; 980} 981#endif 982 983methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const { 984 return instanceKlass::find_method(methods(), name, signature); 985} 986 987methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) { 988 int len = methods->length(); 989 // methods are sorted, so do binary search 990 int l = 0; 991 int h = len - 1; 992 while (l <= h) { 993 int mid = (l + h) >> 1; 994 methodOop m = (methodOop)methods->obj_at(mid); 995 assert(m->is_method(), "must be method"); 996 int res = m->name()->fast_compare(name); 997 if (res == 0) { 998 // found matching name; do linear search to find matching signature 999 // first, quick check for common case 1000 if (m->signature() == signature) return m; 1001 // search downwards through overloaded methods 1002 int i; 1003 for (i = mid - 1; i >= l; i--) { 1004 methodOop m = (methodOop)methods->obj_at(i); 1005 assert(m->is_method(), "must be method"); 1006 if (m->name() != name) break; 1007 if (m->signature() == signature) return m; 1008 } 1009 // search upwards 1010 for (i = mid + 1; i <= h; i++) { 1011 methodOop m = (methodOop)methods->obj_at(i); 1012 assert(m->is_method(), "must be method"); 1013 if (m->name() != name) break; 1014 if (m->signature() == signature) return m; 1015 } 1016 // not found 1017#ifdef ASSERT 1018 int index = linear_search(methods, name, signature); 1019 assert(index == -1, err_msg("binary search should have found entry %d", index)); 1020#endif 1021 return NULL; 1022 } else if (res < 0) { 1023 l = mid + 1; 1024 } else { 1025 h = mid - 1; 1026 } 1027 } 1028#ifdef ASSERT 1029 int index = linear_search(methods, name, signature); 1030 assert(index == -1, err_msg("binary search should have found entry %d", index)); 1031#endif 1032 return NULL; 1033} 1034 1035methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const { 1036 klassOop klass = as_klassOop(); 1037 while (klass != NULL) { 1038 methodOop method = instanceKlass::cast(klass)->find_method(name, signature); 1039 if (method != NULL) return method; 1040 klass = instanceKlass::cast(klass)->super(); 1041 } 1042 return NULL; 1043} 1044 1045// lookup a method in all the interfaces that this class implements 1046methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name, 1047 Symbol* signature) const { 1048 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces(); 1049 int num_ifs = all_ifs->length(); 1050 instanceKlass *ik = NULL; 1051 for (int i = 0; i < num_ifs; i++) { 1052 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i))); 1053 methodOop m = ik->lookup_method(name, signature); 1054 if (m != NULL) { 1055 return m; 1056 } 1057 } 1058 return NULL; 1059} 1060 1061/* jni_id_for_impl for jfieldIds only */ 1062JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) { 1063 MutexLocker ml(JfieldIdCreation_lock); 1064 // Retry lookup after we got the lock 1065 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset); 1066 if (probe == NULL) { 1067 // Slow case, allocate new static field identifier 1068 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids()); 1069 this_oop->set_jni_ids(probe); 1070 } 1071 return probe; 1072} 1073 1074 1075/* jni_id_for for jfieldIds only */ 1076JNIid* instanceKlass::jni_id_for(int offset) { 1077 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset); 1078 if (probe == NULL) { 1079 probe = jni_id_for_impl(this->as_klassOop(), offset); 1080 } 1081 return probe; 1082} 1083 1084 1085// Lookup or create a jmethodID. 1086// This code is called by the VMThread and JavaThreads so the 1087// locking has to be done very carefully to avoid deadlocks 1088// and/or other cache consistency problems. 1089// 1090jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) { 1091 size_t idnum = (size_t)method_h->method_idnum(); 1092 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1093 size_t length = 0; 1094 jmethodID id = NULL; 1095 1096 // We use a double-check locking idiom here because this cache is 1097 // performance sensitive. In the normal system, this cache only 1098 // transitions from NULL to non-NULL which is safe because we use 1099 // release_set_methods_jmethod_ids() to advertise the new cache. 1100 // A partially constructed cache should never be seen by a racing 1101 // thread. We also use release_store_ptr() to save a new jmethodID 1102 // in the cache so a partially constructed jmethodID should never be 1103 // seen either. Cache reads of existing jmethodIDs proceed without a 1104 // lock, but cache writes of a new jmethodID requires uniqueness and 1105 // creation of the cache itself requires no leaks so a lock is 1106 // generally acquired in those two cases. 1107 // 1108 // If the RedefineClasses() API has been used, then this cache can 1109 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1110 // Cache creation requires no leaks and we require safety between all 1111 // cache accesses and freeing of the old cache so a lock is generally 1112 // acquired when the RedefineClasses() API has been used. 1113 1114 if (jmeths != NULL) { 1115 // the cache already exists 1116 if (!ik_h->idnum_can_increment()) { 1117 // the cache can't grow so we can just get the current values 1118 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1119 } else { 1120 // cache can grow so we have to be more careful 1121 if (Threads::number_of_threads() == 0 || 1122 SafepointSynchronize::is_at_safepoint()) { 1123 // we're single threaded or at a safepoint - no locking needed 1124 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1125 } else { 1126 MutexLocker ml(JmethodIdCreation_lock); 1127 get_jmethod_id_length_value(jmeths, idnum, &length, &id); 1128 } 1129 } 1130 } 1131 // implied else: 1132 // we need to allocate a cache so default length and id values are good 1133 1134 if (jmeths == NULL || // no cache yet 1135 length <= idnum || // cache is too short 1136 id == NULL) { // cache doesn't contain entry 1137 1138 // This function can be called by the VMThread so we have to do all 1139 // things that might block on a safepoint before grabbing the lock. 1140 // Otherwise, we can deadlock with the VMThread or have a cache 1141 // consistency issue. These vars keep track of what we might have 1142 // to free after the lock is dropped. 1143 jmethodID to_dealloc_id = NULL; 1144 jmethodID* to_dealloc_jmeths = NULL; 1145 1146 // may not allocate new_jmeths or use it if we allocate it 1147 jmethodID* new_jmeths = NULL; 1148 if (length <= idnum) { 1149 // allocate a new cache that might be used 1150 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count()); 1151 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1); 1152 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID)); 1153 // cache size is stored in element[0], other elements offset by one 1154 new_jmeths[0] = (jmethodID)size; 1155 } 1156 1157 // allocate a new jmethodID that might be used 1158 jmethodID new_id = NULL; 1159 if (method_h->is_old() && !method_h->is_obsolete()) { 1160 // The method passed in is old (but not obsolete), we need to use the current version 1161 methodOop current_method = ik_h->method_with_idnum((int)idnum); 1162 assert(current_method != NULL, "old and but not obsolete, so should exist"); 1163 methodHandle current_method_h(current_method == NULL? method_h() : current_method); 1164 new_id = JNIHandles::make_jmethod_id(current_method_h); 1165 } else { 1166 // It is the current version of the method or an obsolete method, 1167 // use the version passed in 1168 new_id = JNIHandles::make_jmethod_id(method_h); 1169 } 1170 1171 if (Threads::number_of_threads() == 0 || 1172 SafepointSynchronize::is_at_safepoint()) { 1173 // we're single threaded or at a safepoint - no locking needed 1174 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1175 &to_dealloc_id, &to_dealloc_jmeths); 1176 } else { 1177 MutexLocker ml(JmethodIdCreation_lock); 1178 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths, 1179 &to_dealloc_id, &to_dealloc_jmeths); 1180 } 1181 1182 // The lock has been dropped so we can free resources. 1183 // Free up either the old cache or the new cache if we allocated one. 1184 if (to_dealloc_jmeths != NULL) { 1185 FreeHeap(to_dealloc_jmeths); 1186 } 1187 // free up the new ID since it wasn't needed 1188 if (to_dealloc_id != NULL) { 1189 JNIHandles::destroy_jmethod_id(to_dealloc_id); 1190 } 1191 } 1192 return id; 1193} 1194 1195 1196// Common code to fetch the jmethodID from the cache or update the 1197// cache with the new jmethodID. This function should never do anything 1198// that causes the caller to go to a safepoint or we can deadlock with 1199// the VMThread or have cache consistency issues. 1200// 1201jmethodID instanceKlass::get_jmethod_id_fetch_or_update( 1202 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id, 1203 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p, 1204 jmethodID** to_dealloc_jmeths_p) { 1205 assert(new_id != NULL, "sanity check"); 1206 assert(to_dealloc_id_p != NULL, "sanity check"); 1207 assert(to_dealloc_jmeths_p != NULL, "sanity check"); 1208 assert(Threads::number_of_threads() == 0 || 1209 SafepointSynchronize::is_at_safepoint() || 1210 JmethodIdCreation_lock->owned_by_self(), "sanity check"); 1211 1212 // reacquire the cache - we are locked, single threaded or at a safepoint 1213 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire(); 1214 jmethodID id = NULL; 1215 size_t length = 0; 1216 1217 if (jmeths == NULL || // no cache yet 1218 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short 1219 if (jmeths != NULL) { 1220 // copy any existing entries from the old cache 1221 for (size_t index = 0; index < length; index++) { 1222 new_jmeths[index+1] = jmeths[index+1]; 1223 } 1224 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete 1225 } 1226 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths); 1227 } else { 1228 // fetch jmethodID (if any) from the existing cache 1229 id = jmeths[idnum+1]; 1230 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete 1231 } 1232 if (id == NULL) { 1233 // No matching jmethodID in the existing cache or we have a new 1234 // cache or we just grew the cache. This cache write is done here 1235 // by the first thread to win the foot race because a jmethodID 1236 // needs to be unique once it is generally available. 1237 id = new_id; 1238 1239 // The jmethodID cache can be read while unlocked so we have to 1240 // make sure the new jmethodID is complete before installing it 1241 // in the cache. 1242 OrderAccess::release_store_ptr(&jmeths[idnum+1], id); 1243 } else { 1244 *to_dealloc_id_p = new_id; // save new id for later delete 1245 } 1246 return id; 1247} 1248 1249 1250// Common code to get the jmethodID cache length and the jmethodID 1251// value at index idnum if there is one. 1252// 1253void instanceKlass::get_jmethod_id_length_value(jmethodID* cache, 1254 size_t idnum, size_t *length_p, jmethodID* id_p) { 1255 assert(cache != NULL, "sanity check"); 1256 assert(length_p != NULL, "sanity check"); 1257 assert(id_p != NULL, "sanity check"); 1258 1259 // cache size is stored in element[0], other elements offset by one 1260 *length_p = (size_t)cache[0]; 1261 if (*length_p <= idnum) { // cache is too short 1262 *id_p = NULL; 1263 } else { 1264 *id_p = cache[idnum+1]; // fetch jmethodID (if any) 1265 } 1266} 1267 1268 1269// Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles 1270jmethodID instanceKlass::jmethod_id_or_null(methodOop method) { 1271 size_t idnum = (size_t)method->method_idnum(); 1272 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1273 size_t length; // length assigned as debugging crumb 1274 jmethodID id = NULL; 1275 if (jmeths != NULL && // If there is a cache 1276 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough, 1277 id = jmeths[idnum+1]; // Look up the id (may be NULL) 1278 } 1279 return id; 1280} 1281 1282 1283// Cache an itable index 1284void instanceKlass::set_cached_itable_index(size_t idnum, int index) { 1285 int* indices = methods_cached_itable_indices_acquire(); 1286 int* to_dealloc_indices = NULL; 1287 1288 // We use a double-check locking idiom here because this cache is 1289 // performance sensitive. In the normal system, this cache only 1290 // transitions from NULL to non-NULL which is safe because we use 1291 // release_set_methods_cached_itable_indices() to advertise the 1292 // new cache. A partially constructed cache should never be seen 1293 // by a racing thread. Cache reads and writes proceed without a 1294 // lock, but creation of the cache itself requires no leaks so a 1295 // lock is generally acquired in that case. 1296 // 1297 // If the RedefineClasses() API has been used, then this cache can 1298 // grow and we'll have transitions from non-NULL to bigger non-NULL. 1299 // Cache creation requires no leaks and we require safety between all 1300 // cache accesses and freeing of the old cache so a lock is generally 1301 // acquired when the RedefineClasses() API has been used. 1302 1303 if (indices == NULL || idnum_can_increment()) { 1304 // we need a cache or the cache can grow 1305 MutexLocker ml(JNICachedItableIndex_lock); 1306 // reacquire the cache to see if another thread already did the work 1307 indices = methods_cached_itable_indices_acquire(); 1308 size_t length = 0; 1309 // cache size is stored in element[0], other elements offset by one 1310 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { 1311 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); 1312 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); 1313 new_indices[0] = (int)size; 1314 // copy any existing entries 1315 size_t i; 1316 for (i = 0; i < length; i++) { 1317 new_indices[i+1] = indices[i+1]; 1318 } 1319 // Set all the rest to -1 1320 for (i = length; i < size; i++) { 1321 new_indices[i+1] = -1; 1322 } 1323 if (indices != NULL) { 1324 // We have an old cache to delete so save it for after we 1325 // drop the lock. 1326 to_dealloc_indices = indices; 1327 } 1328 release_set_methods_cached_itable_indices(indices = new_indices); 1329 } 1330 1331 if (idnum_can_increment()) { 1332 // this cache can grow so we have to write to it safely 1333 indices[idnum+1] = index; 1334 } 1335 } else { 1336 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); 1337 } 1338 1339 if (!idnum_can_increment()) { 1340 // The cache cannot grow and this JNI itable index value does not 1341 // have to be unique like a jmethodID. If there is a race to set it, 1342 // it doesn't matter. 1343 indices[idnum+1] = index; 1344 } 1345 1346 if (to_dealloc_indices != NULL) { 1347 // we allocated a new cache so free the old one 1348 FreeHeap(to_dealloc_indices); 1349 } 1350} 1351 1352 1353// Retrieve a cached itable index 1354int instanceKlass::cached_itable_index(size_t idnum) { 1355 int* indices = methods_cached_itable_indices_acquire(); 1356 if (indices != NULL && ((size_t)indices[0]) > idnum) { 1357 // indices exist and are long enough, retrieve possible cached 1358 return indices[idnum+1]; 1359 } 1360 return -1; 1361} 1362 1363 1364// 1365// nmethodBucket is used to record dependent nmethods for 1366// deoptimization. nmethod dependencies are actually <klass, method> 1367// pairs but we really only care about the klass part for purposes of 1368// finding nmethods which might need to be deoptimized. Instead of 1369// recording the method, a count of how many times a particular nmethod 1370// was recorded is kept. This ensures that any recording errors are 1371// noticed since an nmethod should be removed as many times are it's 1372// added. 1373// 1374class nmethodBucket { 1375 private: 1376 nmethod* _nmethod; 1377 int _count; 1378 nmethodBucket* _next; 1379 1380 public: 1381 nmethodBucket(nmethod* nmethod, nmethodBucket* next) { 1382 _nmethod = nmethod; 1383 _next = next; 1384 _count = 1; 1385 } 1386 int count() { return _count; } 1387 int increment() { _count += 1; return _count; } 1388 int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; } 1389 nmethodBucket* next() { return _next; } 1390 void set_next(nmethodBucket* b) { _next = b; } 1391 nmethod* get_nmethod() { return _nmethod; } 1392}; 1393 1394 1395// 1396// Walk the list of dependent nmethods searching for nmethods which 1397// are dependent on the klassOop that was passed in and mark them for 1398// deoptimization. Returns the number of nmethods found. 1399// 1400int instanceKlass::mark_dependent_nmethods(DepChange& changes) { 1401 assert_locked_or_safepoint(CodeCache_lock); 1402 int found = 0; 1403 nmethodBucket* b = _dependencies; 1404 while (b != NULL) { 1405 nmethod* nm = b->get_nmethod(); 1406 // since dependencies aren't removed until an nmethod becomes a zombie, 1407 // the dependency list may contain nmethods which aren't alive. 1408 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) { 1409 if (TraceDependencies) { 1410 ResourceMark rm; 1411 tty->print_cr("Marked for deoptimization"); 1412 tty->print_cr(" context = %s", this->external_name()); 1413 changes.print(); 1414 nm->print(); 1415 nm->print_dependencies(); 1416 } 1417 nm->mark_for_deoptimization(); 1418 found++; 1419 } 1420 b = b->next(); 1421 } 1422 return found; 1423} 1424 1425 1426// 1427// Add an nmethodBucket to the list of dependencies for this nmethod. 1428// It's possible that an nmethod has multiple dependencies on this klass 1429// so a count is kept for each bucket to guarantee that creation and 1430// deletion of dependencies is consistent. 1431// 1432void instanceKlass::add_dependent_nmethod(nmethod* nm) { 1433 assert_locked_or_safepoint(CodeCache_lock); 1434 nmethodBucket* b = _dependencies; 1435 nmethodBucket* last = NULL; 1436 while (b != NULL) { 1437 if (nm == b->get_nmethod()) { 1438 b->increment(); 1439 return; 1440 } 1441 b = b->next(); 1442 } 1443 _dependencies = new nmethodBucket(nm, _dependencies); 1444} 1445 1446 1447// 1448// Decrement count of the nmethod in the dependency list and remove 1449// the bucket competely when the count goes to 0. This method must 1450// find a corresponding bucket otherwise there's a bug in the 1451// recording of dependecies. 1452// 1453void instanceKlass::remove_dependent_nmethod(nmethod* nm) { 1454 assert_locked_or_safepoint(CodeCache_lock); 1455 nmethodBucket* b = _dependencies; 1456 nmethodBucket* last = NULL; 1457 while (b != NULL) { 1458 if (nm == b->get_nmethod()) { 1459 if (b->decrement() == 0) { 1460 if (last == NULL) { 1461 _dependencies = b->next(); 1462 } else { 1463 last->set_next(b->next()); 1464 } 1465 delete b; 1466 } 1467 return; 1468 } 1469 last = b; 1470 b = b->next(); 1471 } 1472#ifdef ASSERT 1473 tty->print_cr("### %s can't find dependent nmethod:", this->external_name()); 1474 nm->print(); 1475#endif // ASSERT 1476 ShouldNotReachHere(); 1477} 1478 1479 1480#ifndef PRODUCT 1481void instanceKlass::print_dependent_nmethods(bool verbose) { 1482 nmethodBucket* b = _dependencies; 1483 int idx = 0; 1484 while (b != NULL) { 1485 nmethod* nm = b->get_nmethod(); 1486 tty->print("[%d] count=%d { ", idx++, b->count()); 1487 if (!verbose) { 1488 nm->print_on(tty, "nmethod"); 1489 tty->print_cr(" } "); 1490 } else { 1491 nm->print(); 1492 nm->print_dependencies(); 1493 tty->print_cr("--- } "); 1494 } 1495 b = b->next(); 1496 } 1497} 1498 1499 1500bool instanceKlass::is_dependent_nmethod(nmethod* nm) { 1501 nmethodBucket* b = _dependencies; 1502 while (b != NULL) { 1503 if (nm == b->get_nmethod()) { 1504 return true; 1505 } 1506 b = b->next(); 1507 } 1508 return false; 1509} 1510#endif //PRODUCT 1511 1512 1513#ifdef ASSERT 1514template <class T> void assert_is_in(T *p) { 1515 T heap_oop = oopDesc::load_heap_oop(p); 1516 if (!oopDesc::is_null(heap_oop)) { 1517 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1518 assert(Universe::heap()->is_in(o), "should be in heap"); 1519 } 1520} 1521template <class T> void assert_is_in_closed_subset(T *p) { 1522 T heap_oop = oopDesc::load_heap_oop(p); 1523 if (!oopDesc::is_null(heap_oop)) { 1524 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1525 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); 1526 } 1527} 1528template <class T> void assert_is_in_reserved(T *p) { 1529 T heap_oop = oopDesc::load_heap_oop(p); 1530 if (!oopDesc::is_null(heap_oop)) { 1531 oop o = oopDesc::decode_heap_oop_not_null(heap_oop); 1532 assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); 1533 } 1534} 1535template <class T> void assert_nothing(T *p) {} 1536 1537#else 1538template <class T> void assert_is_in(T *p) {} 1539template <class T> void assert_is_in_closed_subset(T *p) {} 1540template <class T> void assert_is_in_reserved(T *p) {} 1541template <class T> void assert_nothing(T *p) {} 1542#endif // ASSERT 1543 1544// 1545// Macros that iterate over areas of oops which are specialized on type of 1546// oop pointer either narrow or wide, depending on UseCompressedOops 1547// 1548// Parameters are: 1549// T - type of oop to point to (either oop or narrowOop) 1550// start_p - starting pointer for region to iterate over 1551// count - number of oops or narrowOops to iterate over 1552// do_oop - action to perform on each oop (it's arbitrary C code which 1553// makes it more efficient to put in a macro rather than making 1554// it a template function) 1555// assert_fn - assert function which is template function because performance 1556// doesn't matter when enabled. 1557#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ 1558 T, start_p, count, do_oop, \ 1559 assert_fn) \ 1560{ \ 1561 T* p = (T*)(start_p); \ 1562 T* const end = p + (count); \ 1563 while (p < end) { \ 1564 (assert_fn)(p); \ 1565 do_oop; \ 1566 ++p; \ 1567 } \ 1568} 1569 1570#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ 1571 T, start_p, count, do_oop, \ 1572 assert_fn) \ 1573{ \ 1574 T* const start = (T*)(start_p); \ 1575 T* p = start + (count); \ 1576 while (start < p) { \ 1577 --p; \ 1578 (assert_fn)(p); \ 1579 do_oop; \ 1580 } \ 1581} 1582 1583#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ 1584 T, start_p, count, low, high, \ 1585 do_oop, assert_fn) \ 1586{ \ 1587 T* const l = (T*)(low); \ 1588 T* const h = (T*)(high); \ 1589 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ 1590 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ 1591 "bounded region must be properly aligned"); \ 1592 T* p = (T*)(start_p); \ 1593 T* end = p + (count); \ 1594 if (p < l) p = l; \ 1595 if (end > h) end = h; \ 1596 while (p < end) { \ 1597 (assert_fn)(p); \ 1598 do_oop; \ 1599 ++p; \ 1600 } \ 1601} 1602 1603 1604// The following macros call specialized macros, passing either oop or 1605// narrowOop as the specialization type. These test the UseCompressedOops 1606// flag. 1607#define InstanceKlass_OOP_ITERATE(start_p, count, \ 1608 do_oop, assert_fn) \ 1609{ \ 1610 if (UseCompressedOops) { \ 1611 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 1612 start_p, count, \ 1613 do_oop, assert_fn) \ 1614 } else { \ 1615 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ 1616 start_p, count, \ 1617 do_oop, assert_fn) \ 1618 } \ 1619} 1620 1621#define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ 1622 do_oop, assert_fn) \ 1623{ \ 1624 if (UseCompressedOops) { \ 1625 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 1626 start_p, count, \ 1627 low, high, \ 1628 do_oop, assert_fn) \ 1629 } else { \ 1630 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 1631 start_p, count, \ 1632 low, high, \ 1633 do_oop, assert_fn) \ 1634 } \ 1635} 1636 1637#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ 1638{ \ 1639 /* Compute oopmap block range. The common case \ 1640 is nonstatic_oop_map_size == 1. */ \ 1641 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1642 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1643 if (UseCompressedOops) { \ 1644 while (map < end_map) { \ 1645 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ 1646 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1647 do_oop, assert_fn) \ 1648 ++map; \ 1649 } \ 1650 } else { \ 1651 while (map < end_map) { \ 1652 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ 1653 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1654 do_oop, assert_fn) \ 1655 ++map; \ 1656 } \ 1657 } \ 1658} 1659 1660#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ 1661{ \ 1662 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ 1663 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ 1664 if (UseCompressedOops) { \ 1665 while (start_map < map) { \ 1666 --map; \ 1667 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ 1668 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1669 do_oop, assert_fn) \ 1670 } \ 1671 } else { \ 1672 while (start_map < map) { \ 1673 --map; \ 1674 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ 1675 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1676 do_oop, assert_fn) \ 1677 } \ 1678 } \ 1679} 1680 1681#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ 1682 assert_fn) \ 1683{ \ 1684 /* Compute oopmap block range. The common case is \ 1685 nonstatic_oop_map_size == 1, so we accept the \ 1686 usually non-existent extra overhead of examining \ 1687 all the maps. */ \ 1688 OopMapBlock* map = start_of_nonstatic_oop_maps(); \ 1689 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ 1690 if (UseCompressedOops) { \ 1691 while (map < end_map) { \ 1692 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ 1693 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \ 1694 low, high, \ 1695 do_oop, assert_fn) \ 1696 ++map; \ 1697 } \ 1698 } else { \ 1699 while (map < end_map) { \ 1700 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ 1701 obj->obj_field_addr<oop>(map->offset()), map->count(), \ 1702 low, high, \ 1703 do_oop, assert_fn) \ 1704 ++map; \ 1705 } \ 1706 } \ 1707} 1708 1709void instanceKlass::follow_static_fields() { 1710 InstanceKlass_OOP_ITERATE( \ 1711 start_of_static_fields(), static_oop_field_size(), \ 1712 MarkSweep::mark_and_push(p), \ 1713 assert_is_in_closed_subset) 1714} 1715 1716#ifndef SERIALGC 1717void instanceKlass::follow_static_fields(ParCompactionManager* cm) { 1718 InstanceKlass_OOP_ITERATE( \ 1719 start_of_static_fields(), static_oop_field_size(), \ 1720 PSParallelCompact::mark_and_push(cm, p), \ 1721 assert_is_in) 1722} 1723#endif // SERIALGC 1724 1725void instanceKlass::adjust_static_fields() { 1726 InstanceKlass_OOP_ITERATE( \ 1727 start_of_static_fields(), static_oop_field_size(), \ 1728 MarkSweep::adjust_pointer(p), \ 1729 assert_nothing) 1730} 1731 1732#ifndef SERIALGC 1733void instanceKlass::update_static_fields() { 1734 InstanceKlass_OOP_ITERATE( \ 1735 start_of_static_fields(), static_oop_field_size(), \ 1736 PSParallelCompact::adjust_pointer(p), \ 1737 assert_nothing) 1738} 1739 1740void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) { 1741 InstanceKlass_BOUNDED_OOP_ITERATE( \ 1742 start_of_static_fields(), static_oop_field_size(), \ 1743 beg_addr, end_addr, \ 1744 PSParallelCompact::adjust_pointer(p), \ 1745 assert_nothing ) 1746} 1747#endif // SERIALGC 1748 1749void instanceKlass::oop_follow_contents(oop obj) { 1750 assert(obj != NULL, "can't follow the content of NULL object"); 1751 obj->follow_header(); 1752 InstanceKlass_OOP_MAP_ITERATE( \ 1753 obj, \ 1754 MarkSweep::mark_and_push(p), \ 1755 assert_is_in_closed_subset) 1756} 1757 1758#ifndef SERIALGC 1759void instanceKlass::oop_follow_contents(ParCompactionManager* cm, 1760 oop obj) { 1761 assert(obj != NULL, "can't follow the content of NULL object"); 1762 obj->follow_header(cm); 1763 InstanceKlass_OOP_MAP_ITERATE( \ 1764 obj, \ 1765 PSParallelCompact::mark_and_push(cm, p), \ 1766 assert_is_in) 1767} 1768#endif // SERIALGC 1769 1770// closure's do_header() method dicates whether the given closure should be 1771// applied to the klass ptr in the object header. 1772 1773#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ 1774 \ 1775int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ 1776 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1777 /* header */ \ 1778 if (closure->do_header()) { \ 1779 obj->oop_iterate_header(closure); \ 1780 } \ 1781 InstanceKlass_OOP_MAP_ITERATE( \ 1782 obj, \ 1783 SpecializationStats:: \ 1784 record_do_oop_call##nv_suffix(SpecializationStats::ik); \ 1785 (closure)->do_oop##nv_suffix(p), \ 1786 assert_is_in_closed_subset) \ 1787 return size_helper(); \ 1788} 1789 1790#ifndef SERIALGC 1791#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ 1792 \ 1793int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ 1794 OopClosureType* closure) { \ 1795 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \ 1796 /* header */ \ 1797 if (closure->do_header()) { \ 1798 obj->oop_iterate_header(closure); \ 1799 } \ 1800 /* instance variables */ \ 1801 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1802 obj, \ 1803 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\ 1804 (closure)->do_oop##nv_suffix(p), \ 1805 assert_is_in_closed_subset) \ 1806 return size_helper(); \ 1807} 1808#endif // !SERIALGC 1809 1810#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ 1811 \ 1812int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ 1813 OopClosureType* closure, \ 1814 MemRegion mr) { \ 1815 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\ 1816 if (closure->do_header()) { \ 1817 obj->oop_iterate_header(closure, mr); \ 1818 } \ 1819 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ 1820 obj, mr.start(), mr.end(), \ 1821 (closure)->do_oop##nv_suffix(p), \ 1822 assert_is_in_closed_subset) \ 1823 return size_helper(); \ 1824} 1825 1826ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1827ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN) 1828ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1829ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) 1830#ifndef SERIALGC 1831ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1832ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) 1833#endif // !SERIALGC 1834 1835void instanceKlass::iterate_static_fields(OopClosure* closure) { 1836 InstanceKlass_OOP_ITERATE( \ 1837 start_of_static_fields(), static_oop_field_size(), \ 1838 closure->do_oop(p), \ 1839 assert_is_in_reserved) 1840} 1841 1842void instanceKlass::iterate_static_fields(OopClosure* closure, 1843 MemRegion mr) { 1844 InstanceKlass_BOUNDED_OOP_ITERATE( \ 1845 start_of_static_fields(), static_oop_field_size(), \ 1846 mr.start(), mr.end(), \ 1847 (closure)->do_oop_v(p), \ 1848 assert_is_in_closed_subset) 1849} 1850 1851int instanceKlass::oop_adjust_pointers(oop obj) { 1852 int size = size_helper(); 1853 InstanceKlass_OOP_MAP_ITERATE( \ 1854 obj, \ 1855 MarkSweep::adjust_pointer(p), \ 1856 assert_is_in) 1857 obj->adjust_header(); 1858 return size; 1859} 1860 1861#ifndef SERIALGC 1862void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { 1863 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ 1864 obj, \ 1865 if (PSScavenge::should_scavenge(p)) { \ 1866 pm->claim_or_forward_depth(p); \ 1867 }, \ 1868 assert_nothing ) 1869} 1870 1871int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 1872 InstanceKlass_OOP_MAP_ITERATE( \ 1873 obj, \ 1874 PSParallelCompact::adjust_pointer(p), \ 1875 assert_nothing) 1876 return size_helper(); 1877} 1878 1879int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj, 1880 HeapWord* beg_addr, HeapWord* end_addr) { 1881 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ 1882 obj, beg_addr, end_addr, \ 1883 PSParallelCompact::adjust_pointer(p), \ 1884 assert_nothing) 1885 return size_helper(); 1886} 1887 1888void instanceKlass::push_static_fields(PSPromotionManager* pm) { 1889 InstanceKlass_OOP_ITERATE( \ 1890 start_of_static_fields(), static_oop_field_size(), \ 1891 if (PSScavenge::should_scavenge(p)) { \ 1892 pm->claim_or_forward_depth(p); \ 1893 }, \ 1894 assert_nothing ) 1895} 1896 1897void instanceKlass::copy_static_fields(ParCompactionManager* cm) { 1898 InstanceKlass_OOP_ITERATE( \ 1899 start_of_static_fields(), static_oop_field_size(), \ 1900 PSParallelCompact::adjust_pointer(p), \ 1901 assert_is_in) 1902} 1903#endif // SERIALGC 1904 1905// This klass is alive but the implementor link is not followed/updated. 1906// Subklass and sibling links are handled by Klass::follow_weak_klass_links 1907 1908void instanceKlass::follow_weak_klass_links( 1909 BoolObjectClosure* is_alive, OopClosure* keep_alive) { 1910 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live"); 1911 if (ClassUnloading) { 1912 for (int i = 0; i < implementors_limit; i++) { 1913 klassOop impl = _implementors[i]; 1914 if (impl == NULL) break; // no more in the list 1915 if (!is_alive->do_object_b(impl)) { 1916 // remove this guy from the list by overwriting him with the tail 1917 int lasti = --_nof_implementors; 1918 assert(lasti >= i && lasti < implementors_limit, "just checking"); 1919 _implementors[i] = _implementors[lasti]; 1920 _implementors[lasti] = NULL; 1921 --i; // rerun the loop at this index 1922 } 1923 } 1924 } else { 1925 for (int i = 0; i < implementors_limit; i++) { 1926 keep_alive->do_oop(&adr_implementors()[i]); 1927 } 1928 } 1929 Klass::follow_weak_klass_links(is_alive, keep_alive); 1930} 1931 1932void instanceKlass::remove_unshareable_info() { 1933 Klass::remove_unshareable_info(); 1934 init_implementor(); 1935} 1936 1937static void clear_all_breakpoints(methodOop m) { 1938 m->clear_all_breakpoints(); 1939} 1940 1941void instanceKlass::release_C_heap_structures() { 1942 // Deallocate oop map cache 1943 if (_oop_map_cache != NULL) { 1944 delete _oop_map_cache; 1945 _oop_map_cache = NULL; 1946 } 1947 1948 // Deallocate JNI identifiers for jfieldIDs 1949 JNIid::deallocate(jni_ids()); 1950 set_jni_ids(NULL); 1951 1952 jmethodID* jmeths = methods_jmethod_ids_acquire(); 1953 if (jmeths != (jmethodID*)NULL) { 1954 release_set_methods_jmethod_ids(NULL); 1955 FreeHeap(jmeths); 1956 } 1957 1958 int* indices = methods_cached_itable_indices_acquire(); 1959 if (indices != (int*)NULL) { 1960 release_set_methods_cached_itable_indices(NULL); 1961 FreeHeap(indices); 1962 } 1963 1964 // release dependencies 1965 nmethodBucket* b = _dependencies; 1966 _dependencies = NULL; 1967 while (b != NULL) { 1968 nmethodBucket* next = b->next(); 1969 delete b; 1970 b = next; 1971 } 1972 1973 // Deallocate breakpoint records 1974 if (breakpoints() != 0x0) { 1975 methods_do(clear_all_breakpoints); 1976 assert(breakpoints() == 0x0, "should have cleared breakpoints"); 1977 } 1978 1979 // deallocate information about previous versions 1980 if (_previous_versions != NULL) { 1981 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 1982 PreviousVersionNode * pv_node = _previous_versions->at(i); 1983 delete pv_node; 1984 } 1985 delete _previous_versions; 1986 _previous_versions = NULL; 1987 } 1988 1989 // deallocate the cached class file 1990 if (_cached_class_file_bytes != NULL) { 1991 os::free(_cached_class_file_bytes); 1992 _cached_class_file_bytes = NULL; 1993 _cached_class_file_len = 0; 1994 } 1995 1996 // Decrement symbol reference counts associated with the unloaded class. 1997 if (_name != NULL) _name->decrement_refcount(); 1998 // unreference array name derived from this class name (arrays of an unloaded 1999 // class can't be referenced anymore). 2000 if (_array_name != NULL) _array_name->decrement_refcount(); 2001 if (_source_file_name != NULL) _source_file_name->decrement_refcount(); 2002 if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount(); 2003 // walk constant pool and decrement symbol reference counts 2004 _constants->unreference_symbols(); 2005} 2006 2007void instanceKlass::set_source_file_name(Symbol* n) { 2008 _source_file_name = n; 2009 if (_source_file_name != NULL) _source_file_name->increment_refcount(); 2010} 2011 2012void instanceKlass::set_source_debug_extension(Symbol* n) { 2013 _source_debug_extension = n; 2014 if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount(); 2015} 2016 2017const char* instanceKlass::signature_name() const { 2018 const char* src = (const char*) (name()->as_C_string()); 2019 const int src_length = (int)strlen(src); 2020 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3); 2021 int src_index = 0; 2022 int dest_index = 0; 2023 dest[dest_index++] = 'L'; 2024 while (src_index < src_length) { 2025 dest[dest_index++] = src[src_index++]; 2026 } 2027 dest[dest_index++] = ';'; 2028 dest[dest_index] = '\0'; 2029 return dest; 2030} 2031 2032// different verisons of is_same_class_package 2033bool instanceKlass::is_same_class_package(klassOop class2) { 2034 klassOop class1 = as_klassOop(); 2035 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 2036 Symbol* classname1 = Klass::cast(class1)->name(); 2037 2038 if (Klass::cast(class2)->oop_is_objArray()) { 2039 class2 = objArrayKlass::cast(class2)->bottom_klass(); 2040 } 2041 oop classloader2; 2042 if (Klass::cast(class2)->oop_is_instance()) { 2043 classloader2 = instanceKlass::cast(class2)->class_loader(); 2044 } else { 2045 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array"); 2046 classloader2 = NULL; 2047 } 2048 Symbol* classname2 = Klass::cast(class2)->name(); 2049 2050 return instanceKlass::is_same_class_package(classloader1, classname1, 2051 classloader2, classname2); 2052} 2053 2054bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) { 2055 klassOop class1 = as_klassOop(); 2056 oop classloader1 = instanceKlass::cast(class1)->class_loader(); 2057 Symbol* classname1 = Klass::cast(class1)->name(); 2058 2059 return instanceKlass::is_same_class_package(classloader1, classname1, 2060 classloader2, classname2); 2061} 2062 2063// return true if two classes are in the same package, classloader 2064// and classname information is enough to determine a class's package 2065bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1, 2066 oop class_loader2, Symbol* class_name2) { 2067 if (class_loader1 != class_loader2) { 2068 return false; 2069 } else if (class_name1 == class_name2) { 2070 return true; // skip painful bytewise comparison 2071 } else { 2072 ResourceMark rm; 2073 2074 // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly 2075 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding. 2076 // Otherwise, we just compare jbyte values between the strings. 2077 const jbyte *name1 = class_name1->base(); 2078 const jbyte *name2 = class_name2->base(); 2079 2080 const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/'); 2081 const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/'); 2082 2083 if ((last_slash1 == NULL) || (last_slash2 == NULL)) { 2084 // One of the two doesn't have a package. Only return true 2085 // if the other one also doesn't have a package. 2086 return last_slash1 == last_slash2; 2087 } else { 2088 // Skip over '['s 2089 if (*name1 == '[') { 2090 do { 2091 name1++; 2092 } while (*name1 == '['); 2093 if (*name1 != 'L') { 2094 // Something is terribly wrong. Shouldn't be here. 2095 return false; 2096 } 2097 } 2098 if (*name2 == '[') { 2099 do { 2100 name2++; 2101 } while (*name2 == '['); 2102 if (*name2 != 'L') { 2103 // Something is terribly wrong. Shouldn't be here. 2104 return false; 2105 } 2106 } 2107 2108 // Check that package part is identical 2109 int length1 = last_slash1 - name1; 2110 int length2 = last_slash2 - name2; 2111 2112 return UTF8::equal(name1, length1, name2, length2); 2113 } 2114 } 2115} 2116 2117// Returns true iff super_method can be overridden by a method in targetclassname 2118// See JSL 3rd edition 8.4.6.1 2119// Assumes name-signature match 2120// "this" is instanceKlass of super_method which must exist 2121// note that the instanceKlass of the method in the targetclassname has not always been created yet 2122bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) { 2123 // Private methods can not be overridden 2124 if (super_method->is_private()) { 2125 return false; 2126 } 2127 // If super method is accessible, then override 2128 if ((super_method->is_protected()) || 2129 (super_method->is_public())) { 2130 return true; 2131 } 2132 // Package-private methods are not inherited outside of package 2133 assert(super_method->is_package_private(), "must be package private"); 2134 return(is_same_class_package(targetclassloader(), targetclassname)); 2135} 2136 2137/* defined for now in jvm.cpp, for historical reasons *-- 2138klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self, 2139 Symbol*& simple_name_result, TRAPS) { 2140 ... 2141} 2142*/ 2143 2144// tell if two classes have the same enclosing class (at package level) 2145bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1, 2146 klassOop class2_oop, TRAPS) { 2147 if (class2_oop == class1->as_klassOop()) return true; 2148 if (!Klass::cast(class2_oop)->oop_is_instance()) return false; 2149 instanceKlassHandle class2(THREAD, class2_oop); 2150 2151 // must be in same package before we try anything else 2152 if (!class1->is_same_class_package(class2->class_loader(), class2->name())) 2153 return false; 2154 2155 // As long as there is an outer1.getEnclosingClass, 2156 // shift the search outward. 2157 instanceKlassHandle outer1 = class1; 2158 for (;;) { 2159 // As we walk along, look for equalities between outer1 and class2. 2160 // Eventually, the walks will terminate as outer1 stops 2161 // at the top-level class around the original class. 2162 bool ignore_inner_is_member; 2163 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member, 2164 CHECK_false); 2165 if (next == NULL) break; 2166 if (next == class2()) return true; 2167 outer1 = instanceKlassHandle(THREAD, next); 2168 } 2169 2170 // Now do the same for class2. 2171 instanceKlassHandle outer2 = class2; 2172 for (;;) { 2173 bool ignore_inner_is_member; 2174 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member, 2175 CHECK_false); 2176 if (next == NULL) break; 2177 // Might as well check the new outer against all available values. 2178 if (next == class1()) return true; 2179 if (next == outer1()) return true; 2180 outer2 = instanceKlassHandle(THREAD, next); 2181 } 2182 2183 // If by this point we have not found an equality between the 2184 // two classes, we know they are in separate package members. 2185 return false; 2186} 2187 2188 2189jint instanceKlass::compute_modifier_flags(TRAPS) const { 2190 klassOop k = as_klassOop(); 2191 jint access = access_flags().as_int(); 2192 2193 // But check if it happens to be member class. 2194 typeArrayOop inner_class_list = inner_classes(); 2195 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length(); 2196 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking"); 2197 if (length > 0) { 2198 typeArrayHandle inner_class_list_h(THREAD, inner_class_list); 2199 instanceKlassHandle ik(THREAD, k); 2200 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) { 2201 int ioff = inner_class_list_h->ushort_at( 2202 i + instanceKlass::inner_class_inner_class_info_offset); 2203 2204 // Inner class attribute can be zero, skip it. 2205 // Strange but true: JVM spec. allows null inner class refs. 2206 if (ioff == 0) continue; 2207 2208 // only look at classes that are already loaded 2209 // since we are looking for the flags for our self. 2210 Symbol* inner_name = ik->constants()->klass_name_at(ioff); 2211 if ((ik->name() == inner_name)) { 2212 // This is really a member class. 2213 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset); 2214 break; 2215 } 2216 } 2217 } 2218 // Remember to strip ACC_SUPER bit 2219 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS; 2220} 2221 2222jint instanceKlass::jvmti_class_status() const { 2223 jint result = 0; 2224 2225 if (is_linked()) { 2226 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED; 2227 } 2228 2229 if (is_initialized()) { 2230 assert(is_linked(), "Class status is not consistent"); 2231 result |= JVMTI_CLASS_STATUS_INITIALIZED; 2232 } 2233 if (is_in_error_state()) { 2234 result |= JVMTI_CLASS_STATUS_ERROR; 2235 } 2236 return result; 2237} 2238 2239methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) { 2240 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable(); 2241 int method_table_offset_in_words = ioe->offset()/wordSize; 2242 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words()) 2243 / itableOffsetEntry::size(); 2244 2245 for (int cnt = 0 ; ; cnt ++, ioe ++) { 2246 // If the interface isn't implemented by the receiver class, 2247 // the VM should throw IncompatibleClassChangeError. 2248 if (cnt >= nof_interfaces) { 2249 THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError()); 2250 } 2251 2252 klassOop ik = ioe->interface_klass(); 2253 if (ik == holder) break; 2254 } 2255 2256 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop()); 2257 methodOop m = ime[index].method(); 2258 if (m == NULL) { 2259 THROW_0(vmSymbols::java_lang_AbstractMethodError()); 2260 } 2261 return m; 2262} 2263 2264// On-stack replacement stuff 2265void instanceKlass::add_osr_nmethod(nmethod* n) { 2266 // only one compilation can be active 2267 NEEDS_CLEANUP 2268 // This is a short non-blocking critical region, so the no safepoint check is ok. 2269 OsrList_lock->lock_without_safepoint_check(); 2270 assert(n->is_osr_method(), "wrong kind of nmethod"); 2271 n->set_osr_link(osr_nmethods_head()); 2272 set_osr_nmethods_head(n); 2273 // Raise the highest osr level if necessary 2274 if (TieredCompilation) { 2275 methodOop m = n->method(); 2276 m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level())); 2277 } 2278 // Remember to unlock again 2279 OsrList_lock->unlock(); 2280 2281 // Get rid of the osr methods for the same bci that have lower levels. 2282 if (TieredCompilation) { 2283 for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) { 2284 nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true); 2285 if (inv != NULL && inv->is_in_use()) { 2286 inv->make_not_entrant(); 2287 } 2288 } 2289 } 2290} 2291 2292 2293void instanceKlass::remove_osr_nmethod(nmethod* n) { 2294 // This is a short non-blocking critical region, so the no safepoint check is ok. 2295 OsrList_lock->lock_without_safepoint_check(); 2296 assert(n->is_osr_method(), "wrong kind of nmethod"); 2297 nmethod* last = NULL; 2298 nmethod* cur = osr_nmethods_head(); 2299 int max_level = CompLevel_none; // Find the max comp level excluding n 2300 methodOop m = n->method(); 2301 // Search for match 2302 while(cur != NULL && cur != n) { 2303 if (TieredCompilation) { 2304 // Find max level before n 2305 max_level = MAX2(max_level, cur->comp_level()); 2306 } 2307 last = cur; 2308 cur = cur->osr_link(); 2309 } 2310 nmethod* next = NULL; 2311 if (cur == n) { 2312 next = cur->osr_link(); 2313 if (last == NULL) { 2314 // Remove first element 2315 set_osr_nmethods_head(next); 2316 } else { 2317 last->set_osr_link(next); 2318 } 2319 } 2320 n->set_osr_link(NULL); 2321 if (TieredCompilation) { 2322 cur = next; 2323 while (cur != NULL) { 2324 // Find max level after n 2325 max_level = MAX2(max_level, cur->comp_level()); 2326 cur = cur->osr_link(); 2327 } 2328 m->set_highest_osr_comp_level(max_level); 2329 } 2330 // Remember to unlock again 2331 OsrList_lock->unlock(); 2332} 2333 2334nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const { 2335 // This is a short non-blocking critical region, so the no safepoint check is ok. 2336 OsrList_lock->lock_without_safepoint_check(); 2337 nmethod* osr = osr_nmethods_head(); 2338 nmethod* best = NULL; 2339 while (osr != NULL) { 2340 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain"); 2341 // There can be a time when a c1 osr method exists but we are waiting 2342 // for a c2 version. When c2 completes its osr nmethod we will trash 2343 // the c1 version and only be able to find the c2 version. However 2344 // while we overflow in the c1 code at back branches we don't want to 2345 // try and switch to the same code as we are already running 2346 2347 if (osr->method() == m && 2348 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) { 2349 if (match_level) { 2350 if (osr->comp_level() == comp_level) { 2351 // Found a match - return it. 2352 OsrList_lock->unlock(); 2353 return osr; 2354 } 2355 } else { 2356 if (best == NULL || (osr->comp_level() > best->comp_level())) { 2357 if (osr->comp_level() == CompLevel_highest_tier) { 2358 // Found the best possible - return it. 2359 OsrList_lock->unlock(); 2360 return osr; 2361 } 2362 best = osr; 2363 } 2364 } 2365 } 2366 osr = osr->osr_link(); 2367 } 2368 OsrList_lock->unlock(); 2369 if (best != NULL && best->comp_level() >= comp_level && match_level == false) { 2370 return best; 2371 } 2372 return NULL; 2373} 2374 2375// ----------------------------------------------------------------------------------------------------- 2376#ifndef PRODUCT 2377 2378// Printing 2379 2380#define BULLET " - " 2381 2382void FieldPrinter::do_field(fieldDescriptor* fd) { 2383 _st->print(BULLET); 2384 if (fd->is_static() || (_obj == NULL)) { 2385 fd->print_on(_st); 2386 _st->cr(); 2387 } else { 2388 fd->print_on_for(_st, _obj); 2389 _st->cr(); 2390 } 2391} 2392 2393 2394void instanceKlass::oop_print_on(oop obj, outputStream* st) { 2395 Klass::oop_print_on(obj, st); 2396 2397 if (as_klassOop() == SystemDictionary::String_klass()) { 2398 typeArrayOop value = java_lang_String::value(obj); 2399 juint offset = java_lang_String::offset(obj); 2400 juint length = java_lang_String::length(obj); 2401 if (value != NULL && 2402 value->is_typeArray() && 2403 offset <= (juint) value->length() && 2404 offset + length <= (juint) value->length()) { 2405 st->print(BULLET"string: "); 2406 Handle h_obj(obj); 2407 java_lang_String::print(h_obj, st); 2408 st->cr(); 2409 if (!WizardMode) return; // that is enough 2410 } 2411 } 2412 2413 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj)); 2414 FieldPrinter print_nonstatic_field(st, obj); 2415 do_nonstatic_fields(&print_nonstatic_field); 2416 2417 if (as_klassOop() == SystemDictionary::Class_klass()) { 2418 st->print(BULLET"signature: "); 2419 java_lang_Class::print_signature(obj, st); 2420 st->cr(); 2421 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj); 2422 st->print(BULLET"fake entry for mirror: "); 2423 mirrored_klass->print_value_on(st); 2424 st->cr(); 2425 st->print(BULLET"fake entry resolved_constructor: "); 2426 methodOop ctor = java_lang_Class::resolved_constructor(obj); 2427 ctor->print_value_on(st); 2428 klassOop array_klass = java_lang_Class::array_klass(obj); 2429 st->cr(); 2430 st->print(BULLET"fake entry for array: "); 2431 array_klass->print_value_on(st); 2432 st->cr(); 2433 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2434 st->print(BULLET"signature: "); 2435 java_dyn_MethodType::print_signature(obj, st); 2436 st->cr(); 2437 } 2438} 2439 2440#endif //PRODUCT 2441 2442void instanceKlass::oop_print_value_on(oop obj, outputStream* st) { 2443 st->print("a "); 2444 name()->print_value_on(st); 2445 obj->print_address_on(st); 2446 if (as_klassOop() == SystemDictionary::String_klass() 2447 && java_lang_String::value(obj) != NULL) { 2448 ResourceMark rm; 2449 int len = java_lang_String::length(obj); 2450 int plen = (len < 24 ? len : 12); 2451 char* str = java_lang_String::as_utf8_string(obj, 0, plen); 2452 st->print(" = \"%s\"", str); 2453 if (len > plen) 2454 st->print("...[%d]", len); 2455 } else if (as_klassOop() == SystemDictionary::Class_klass()) { 2456 klassOop k = java_lang_Class::as_klassOop(obj); 2457 st->print(" = "); 2458 if (k != NULL) { 2459 k->print_value_on(st); 2460 } else { 2461 const char* tname = type2name(java_lang_Class::primitive_type(obj)); 2462 st->print("%s", tname ? tname : "type?"); 2463 } 2464 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) { 2465 st->print(" = "); 2466 java_dyn_MethodType::print_signature(obj, st); 2467 } else if (java_lang_boxing_object::is_instance(obj)) { 2468 st->print(" = "); 2469 java_lang_boxing_object::print(obj, st); 2470 } 2471} 2472 2473const char* instanceKlass::internal_name() const { 2474 return external_name(); 2475} 2476 2477// Verification 2478 2479class VerifyFieldClosure: public OopClosure { 2480 protected: 2481 template <class T> void do_oop_work(T* p) { 2482 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap"); 2483 oop obj = oopDesc::load_decode_heap_oop(p); 2484 if (!obj->is_oop_or_null()) { 2485 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj); 2486 Universe::print(); 2487 guarantee(false, "boom"); 2488 } 2489 } 2490 public: 2491 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); } 2492 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } 2493}; 2494 2495void instanceKlass::oop_verify_on(oop obj, outputStream* st) { 2496 Klass::oop_verify_on(obj, st); 2497 VerifyFieldClosure blk; 2498 oop_oop_iterate(obj, &blk); 2499} 2500 2501#ifndef PRODUCT 2502 2503void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) { 2504 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version() 2505 // cannot be called since this function is called before the VM is 2506 // able to determine what JDK version is running with. 2507 // The check below always is false since 1.4. 2508 return; 2509 2510 // This verification code temporarily disabled for the 1.4 2511 // reflection implementation since java.lang.Class now has 2512 // Java-level instance fields. Should rewrite this to handle this 2513 // case. 2514 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) { 2515 // Verify that java.lang.Class instances have a fake oop field added. 2516 instanceKlass* ik = instanceKlass::cast(k); 2517 2518 // Check that we have the right class 2519 static bool first_time = true; 2520 guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps"); 2521 first_time = false; 2522 const int extra = java_lang_Class::number_of_fake_oop_fields; 2523 guarantee(ik->nonstatic_field_size() == extra, "just checking"); 2524 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking"); 2525 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking"); 2526 2527 // Check that the map is (2,extra) 2528 int offset = java_lang_Class::klass_offset; 2529 2530 OopMapBlock* map = ik->start_of_nonstatic_oop_maps(); 2531 guarantee(map->offset() == offset && map->count() == (unsigned int) extra, 2532 "sanity"); 2533 } 2534} 2535 2536#endif // ndef PRODUCT 2537 2538// JNIid class for jfieldIDs only 2539// Note to reviewers: 2540// These JNI functions are just moved over to column 1 and not changed 2541// in the compressed oops workspace. 2542JNIid::JNIid(klassOop holder, int offset, JNIid* next) { 2543 _holder = holder; 2544 _offset = offset; 2545 _next = next; 2546 debug_only(_is_static_field_id = false;) 2547} 2548 2549 2550JNIid* JNIid::find(int offset) { 2551 JNIid* current = this; 2552 while (current != NULL) { 2553 if (current->offset() == offset) return current; 2554 current = current->next(); 2555 } 2556 return NULL; 2557} 2558 2559void JNIid::oops_do(OopClosure* f) { 2560 for (JNIid* cur = this; cur != NULL; cur = cur->next()) { 2561 f->do_oop(cur->holder_addr()); 2562 } 2563} 2564 2565void JNIid::deallocate(JNIid* current) { 2566 while (current != NULL) { 2567 JNIid* next = current->next(); 2568 delete current; 2569 current = next; 2570 } 2571} 2572 2573 2574void JNIid::verify(klassOop holder) { 2575 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields(); 2576 int end_field_offset; 2577 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize); 2578 2579 JNIid* current = this; 2580 while (current != NULL) { 2581 guarantee(current->holder() == holder, "Invalid klass in JNIid"); 2582#ifdef ASSERT 2583 int o = current->offset(); 2584 if (current->is_static_field_id()) { 2585 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid"); 2586 } 2587#endif 2588 current = current->next(); 2589 } 2590} 2591 2592 2593#ifdef ASSERT 2594void instanceKlass::set_init_state(ClassState state) { 2595 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state) 2596 : (_init_state < state); 2597 assert(good_state || state == allocated, "illegal state transition"); 2598 _init_state = state; 2599} 2600#endif 2601 2602 2603// RedefineClasses() support for previous versions: 2604 2605// Add an information node that contains weak references to the 2606// interesting parts of the previous version of the_class. 2607// This is also where we clean out any unused weak references. 2608// Note that while we delete nodes from the _previous_versions 2609// array, we never delete the array itself until the klass is 2610// unloaded. The has_been_redefined() query depends on that fact. 2611// 2612void instanceKlass::add_previous_version(instanceKlassHandle ikh, 2613 BitMap* emcp_methods, int emcp_method_count) { 2614 assert(Thread::current()->is_VM_thread(), 2615 "only VMThread can add previous versions"); 2616 2617 if (_previous_versions == NULL) { 2618 // This is the first previous version so make some space. 2619 // Start with 2 elements under the assumption that the class 2620 // won't be redefined much. 2621 _previous_versions = new (ResourceObj::C_HEAP) 2622 GrowableArray<PreviousVersionNode *>(2, true); 2623 } 2624 2625 // RC_TRACE macro has an embedded ResourceMark 2626 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d", 2627 ikh->external_name(), _previous_versions->length(), emcp_method_count)); 2628 constantPoolHandle cp_h(ikh->constants()); 2629 jobject cp_ref; 2630 if (cp_h->is_shared()) { 2631 // a shared ConstantPool requires a regular reference; a weak 2632 // reference would be collectible 2633 cp_ref = JNIHandles::make_global(cp_h); 2634 } else { 2635 cp_ref = JNIHandles::make_weak_global(cp_h); 2636 } 2637 PreviousVersionNode * pv_node = NULL; 2638 objArrayOop old_methods = ikh->methods(); 2639 2640 if (emcp_method_count == 0) { 2641 // non-shared ConstantPool gets a weak reference 2642 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL); 2643 RC_TRACE(0x00000400, 2644 ("add: all methods are obsolete; flushing any EMCP weak refs")); 2645 } else { 2646 int local_count = 0; 2647 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP) 2648 GrowableArray<jweak>(emcp_method_count, true); 2649 for (int i = 0; i < old_methods->length(); i++) { 2650 if (emcp_methods->at(i)) { 2651 // this old method is EMCP so save a weak ref 2652 methodOop old_method = (methodOop) old_methods->obj_at(i); 2653 methodHandle old_method_h(old_method); 2654 jweak method_ref = JNIHandles::make_weak_global(old_method_h); 2655 method_refs->append(method_ref); 2656 if (++local_count >= emcp_method_count) { 2657 // no more EMCP methods so bail out now 2658 break; 2659 } 2660 } 2661 } 2662 // non-shared ConstantPool gets a weak reference 2663 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs); 2664 } 2665 2666 _previous_versions->append(pv_node); 2667 2668 // Using weak references allows the interesting parts of previous 2669 // classes to be GC'ed when they are no longer needed. Since the 2670 // caller is the VMThread and we are at a safepoint, this is a good 2671 // time to clear out unused weak references. 2672 2673 RC_TRACE(0x00000400, ("add: previous version length=%d", 2674 _previous_versions->length())); 2675 2676 // skip the last entry since we just added it 2677 for (int i = _previous_versions->length() - 2; i >= 0; i--) { 2678 // check the previous versions array for a GC'ed weak refs 2679 pv_node = _previous_versions->at(i); 2680 cp_ref = pv_node->prev_constant_pool(); 2681 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2682 if (cp_ref == NULL) { 2683 delete pv_node; 2684 _previous_versions->remove_at(i); 2685 // Since we are traversing the array backwards, we don't have to 2686 // do anything special with the index. 2687 continue; // robustness 2688 } 2689 2690 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2691 if (cp == NULL) { 2692 // this entry has been GC'ed so remove it 2693 delete pv_node; 2694 _previous_versions->remove_at(i); 2695 // Since we are traversing the array backwards, we don't have to 2696 // do anything special with the index. 2697 continue; 2698 } else { 2699 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i)); 2700 } 2701 2702 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2703 if (method_refs != NULL) { 2704 RC_TRACE(0x00000400, ("add: previous methods length=%d", 2705 method_refs->length())); 2706 for (int j = method_refs->length() - 1; j >= 0; j--) { 2707 jweak method_ref = method_refs->at(j); 2708 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2709 if (method_ref == NULL) { 2710 method_refs->remove_at(j); 2711 // Since we are traversing the array backwards, we don't have to 2712 // do anything special with the index. 2713 continue; // robustness 2714 } 2715 2716 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2717 if (method == NULL || emcp_method_count == 0) { 2718 // This method entry has been GC'ed or the current 2719 // RedefineClasses() call has made all methods obsolete 2720 // so remove it. 2721 JNIHandles::destroy_weak_global(method_ref); 2722 method_refs->remove_at(j); 2723 } else { 2724 // RC_TRACE macro has an embedded ResourceMark 2725 RC_TRACE(0x00000400, 2726 ("add: %s(%s): previous method @%d in version @%d is alive", 2727 method->name()->as_C_string(), method->signature()->as_C_string(), 2728 j, i)); 2729 } 2730 } 2731 } 2732 } 2733 2734 int obsolete_method_count = old_methods->length() - emcp_method_count; 2735 2736 if (emcp_method_count != 0 && obsolete_method_count != 0 && 2737 _previous_versions->length() > 1) { 2738 // We have a mix of obsolete and EMCP methods. If there is more 2739 // than the previous version that we just added, then we have to 2740 // clear out any matching EMCP method entries the hard way. 2741 int local_count = 0; 2742 for (int i = 0; i < old_methods->length(); i++) { 2743 if (!emcp_methods->at(i)) { 2744 // only obsolete methods are interesting 2745 methodOop old_method = (methodOop) old_methods->obj_at(i); 2746 Symbol* m_name = old_method->name(); 2747 Symbol* m_signature = old_method->signature(); 2748 2749 // skip the last entry since we just added it 2750 for (int j = _previous_versions->length() - 2; j >= 0; j--) { 2751 // check the previous versions array for a GC'ed weak refs 2752 pv_node = _previous_versions->at(j); 2753 cp_ref = pv_node->prev_constant_pool(); 2754 assert(cp_ref != NULL, "cp ref was unexpectedly cleared"); 2755 if (cp_ref == NULL) { 2756 delete pv_node; 2757 _previous_versions->remove_at(j); 2758 // Since we are traversing the array backwards, we don't have to 2759 // do anything special with the index. 2760 continue; // robustness 2761 } 2762 2763 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2764 if (cp == NULL) { 2765 // this entry has been GC'ed so remove it 2766 delete pv_node; 2767 _previous_versions->remove_at(j); 2768 // Since we are traversing the array backwards, we don't have to 2769 // do anything special with the index. 2770 continue; 2771 } 2772 2773 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2774 if (method_refs == NULL) { 2775 // We have run into a PreviousVersion generation where 2776 // all methods were made obsolete during that generation's 2777 // RedefineClasses() operation. At the time of that 2778 // operation, all EMCP methods were flushed so we don't 2779 // have to go back any further. 2780 // 2781 // A NULL method_refs is different than an empty method_refs. 2782 // We cannot infer any optimizations about older generations 2783 // from an empty method_refs for the current generation. 2784 break; 2785 } 2786 2787 for (int k = method_refs->length() - 1; k >= 0; k--) { 2788 jweak method_ref = method_refs->at(k); 2789 assert(method_ref != NULL, 2790 "weak method ref was unexpectedly cleared"); 2791 if (method_ref == NULL) { 2792 method_refs->remove_at(k); 2793 // Since we are traversing the array backwards, we don't 2794 // have to do anything special with the index. 2795 continue; // robustness 2796 } 2797 2798 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2799 if (method == NULL) { 2800 // this method entry has been GC'ed so skip it 2801 JNIHandles::destroy_weak_global(method_ref); 2802 method_refs->remove_at(k); 2803 continue; 2804 } 2805 2806 if (method->name() == m_name && 2807 method->signature() == m_signature) { 2808 // The current RedefineClasses() call has made all EMCP 2809 // versions of this method obsolete so mark it as obsolete 2810 // and remove the weak ref. 2811 RC_TRACE(0x00000400, 2812 ("add: %s(%s): flush obsolete method @%d in version @%d", 2813 m_name->as_C_string(), m_signature->as_C_string(), k, j)); 2814 2815 method->set_is_obsolete(); 2816 JNIHandles::destroy_weak_global(method_ref); 2817 method_refs->remove_at(k); 2818 break; 2819 } 2820 } 2821 2822 // The previous loop may not find a matching EMCP method, but 2823 // that doesn't mean that we can optimize and not go any 2824 // further back in the PreviousVersion generations. The EMCP 2825 // method for this generation could have already been GC'ed, 2826 // but there still may be an older EMCP method that has not 2827 // been GC'ed. 2828 } 2829 2830 if (++local_count >= obsolete_method_count) { 2831 // no more obsolete methods so bail out now 2832 break; 2833 } 2834 } 2835 } 2836 } 2837} // end add_previous_version() 2838 2839 2840// Determine if instanceKlass has a previous version. 2841bool instanceKlass::has_previous_version() const { 2842 if (_previous_versions == NULL) { 2843 // no previous versions array so answer is easy 2844 return false; 2845 } 2846 2847 for (int i = _previous_versions->length() - 1; i >= 0; i--) { 2848 // Check the previous versions array for an info node that hasn't 2849 // been GC'ed 2850 PreviousVersionNode * pv_node = _previous_versions->at(i); 2851 2852 jobject cp_ref = pv_node->prev_constant_pool(); 2853 assert(cp_ref != NULL, "cp reference was unexpectedly cleared"); 2854 if (cp_ref == NULL) { 2855 continue; // robustness 2856 } 2857 2858 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2859 if (cp != NULL) { 2860 // we have at least one previous version 2861 return true; 2862 } 2863 2864 // We don't have to check the method refs. If the constant pool has 2865 // been GC'ed then so have the methods. 2866 } 2867 2868 // all of the underlying nodes' info has been GC'ed 2869 return false; 2870} // end has_previous_version() 2871 2872methodOop instanceKlass::method_with_idnum(int idnum) { 2873 methodOop m = NULL; 2874 if (idnum < methods()->length()) { 2875 m = (methodOop) methods()->obj_at(idnum); 2876 } 2877 if (m == NULL || m->method_idnum() != idnum) { 2878 for (int index = 0; index < methods()->length(); ++index) { 2879 m = (methodOop) methods()->obj_at(index); 2880 if (m->method_idnum() == idnum) { 2881 return m; 2882 } 2883 } 2884 } 2885 return m; 2886} 2887 2888 2889// Set the annotation at 'idnum' to 'anno'. 2890// We don't want to create or extend the array if 'anno' is NULL, since that is the 2891// default value. However, if the array exists and is long enough, we must set NULL values. 2892void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) { 2893 objArrayOop md = *md_p; 2894 if (md != NULL && md->length() > idnum) { 2895 md->obj_at_put(idnum, anno); 2896 } else if (anno != NULL) { 2897 // create the array 2898 int length = MAX2(idnum+1, (int)_idnum_allocated_count); 2899 md = oopFactory::new_system_objArray(length, Thread::current()); 2900 if (*md_p != NULL) { 2901 // copy the existing entries 2902 for (int index = 0; index < (*md_p)->length(); index++) { 2903 md->obj_at_put(index, (*md_p)->obj_at(index)); 2904 } 2905 } 2906 set_annotations(md, md_p); 2907 md->obj_at_put(idnum, anno); 2908 } // if no array and idnum isn't included there is nothing to do 2909} 2910 2911// Construct a PreviousVersionNode entry for the array hung off 2912// the instanceKlass. 2913PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool, 2914 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) { 2915 2916 _prev_constant_pool = prev_constant_pool; 2917 _prev_cp_is_weak = prev_cp_is_weak; 2918 _prev_EMCP_methods = prev_EMCP_methods; 2919} 2920 2921 2922// Destroy a PreviousVersionNode 2923PreviousVersionNode::~PreviousVersionNode() { 2924 if (_prev_constant_pool != NULL) { 2925 if (_prev_cp_is_weak) { 2926 JNIHandles::destroy_weak_global(_prev_constant_pool); 2927 } else { 2928 JNIHandles::destroy_global(_prev_constant_pool); 2929 } 2930 } 2931 2932 if (_prev_EMCP_methods != NULL) { 2933 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) { 2934 jweak method_ref = _prev_EMCP_methods->at(i); 2935 if (method_ref != NULL) { 2936 JNIHandles::destroy_weak_global(method_ref); 2937 } 2938 } 2939 delete _prev_EMCP_methods; 2940 } 2941} 2942 2943 2944// Construct a PreviousVersionInfo entry 2945PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) { 2946 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle 2947 _prev_EMCP_method_handles = NULL; 2948 2949 jobject cp_ref = pv_node->prev_constant_pool(); 2950 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared"); 2951 if (cp_ref == NULL) { 2952 return; // robustness 2953 } 2954 2955 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref); 2956 if (cp == NULL) { 2957 // Weak reference has been GC'ed. Since the constant pool has been 2958 // GC'ed, the methods have also been GC'ed. 2959 return; 2960 } 2961 2962 // make the constantPoolOop safe to return 2963 _prev_constant_pool_handle = constantPoolHandle(cp); 2964 2965 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods(); 2966 if (method_refs == NULL) { 2967 // the instanceKlass did not have any EMCP methods 2968 return; 2969 } 2970 2971 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10); 2972 2973 int n_methods = method_refs->length(); 2974 for (int i = 0; i < n_methods; i++) { 2975 jweak method_ref = method_refs->at(i); 2976 assert(method_ref != NULL, "weak method ref was unexpectedly cleared"); 2977 if (method_ref == NULL) { 2978 continue; // robustness 2979 } 2980 2981 methodOop method = (methodOop)JNIHandles::resolve(method_ref); 2982 if (method == NULL) { 2983 // this entry has been GC'ed so skip it 2984 continue; 2985 } 2986 2987 // make the methodOop safe to return 2988 _prev_EMCP_method_handles->append(methodHandle(method)); 2989 } 2990} 2991 2992 2993// Destroy a PreviousVersionInfo 2994PreviousVersionInfo::~PreviousVersionInfo() { 2995 // Since _prev_EMCP_method_handles is not C-heap allocated, we 2996 // don't have to delete it. 2997} 2998 2999 3000// Construct a helper for walking the previous versions array 3001PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) { 3002 _previous_versions = ik->previous_versions(); 3003 _current_index = 0; 3004 // _hm needs no initialization 3005 _current_p = NULL; 3006} 3007 3008 3009// Destroy a PreviousVersionWalker 3010PreviousVersionWalker::~PreviousVersionWalker() { 3011 // Delete the current info just in case the caller didn't walk to 3012 // the end of the previous versions list. No harm if _current_p is 3013 // already NULL. 3014 delete _current_p; 3015 3016 // When _hm is destroyed, all the Handles returned in 3017 // PreviousVersionInfo objects will be destroyed. 3018 // Also, after this destructor is finished it will be 3019 // safe to delete the GrowableArray allocated in the 3020 // PreviousVersionInfo objects. 3021} 3022 3023 3024// Return the interesting information for the next previous version 3025// of the klass. Returns NULL if there are no more previous versions. 3026PreviousVersionInfo* PreviousVersionWalker::next_previous_version() { 3027 if (_previous_versions == NULL) { 3028 // no previous versions so nothing to return 3029 return NULL; 3030 } 3031 3032 delete _current_p; // cleanup the previous info for the caller 3033 _current_p = NULL; // reset to NULL so we don't delete same object twice 3034 3035 int length = _previous_versions->length(); 3036 3037 while (_current_index < length) { 3038 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++); 3039 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP) 3040 PreviousVersionInfo(pv_node); 3041 3042 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle(); 3043 if (cp_h.is_null()) { 3044 delete pv_info; 3045 3046 // The underlying node's info has been GC'ed so try the next one. 3047 // We don't have to check the methods. If the constant pool has 3048 // GC'ed then so have the methods. 3049 continue; 3050 } 3051 3052 // Found a node with non GC'ed info so return it. The caller will 3053 // need to delete pv_info when they are done with it. 3054 _current_p = pv_info; 3055 return pv_info; 3056 } 3057 3058 // all of the underlying nodes' info has been GC'ed 3059 return NULL; 3060} // end next_previous_version() 3061