oopMapCache.cpp revision 1472:c18cbe5936b8
118334Speter/* 2132718Skan * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved. 3169689Skan * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4169689Skan * 518334Speter * This code is free software; you can redistribute it and/or modify it 690075Sobrien * under the terms of the GNU General Public License version 2 only, as 718334Speter * published by the Free Software Foundation. 890075Sobrien * 990075Sobrien * This code is distributed in the hope that it will be useful, but WITHOUT 1090075Sobrien * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1190075Sobrien * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1218334Speter * version 2 for more details (a copy is included in the LICENSE file that 1390075Sobrien * accompanied this code). 1490075Sobrien * 1590075Sobrien * You should have received a copy of the GNU General Public License version 1690075Sobrien * 2 along with this work; if not, write to the Free Software Foundation, 1718334Speter * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1818334Speter * 1990075Sobrien * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20169689Skan * or visit www.oracle.com if you need additional information or have any 21169689Skan * questions. 2218334Speter * 2318334Speter */ 24169689Skan 25169689Skan# include "incls/_precompiled.incl" 26169689Skan# include "incls/_oopMapCache.cpp.incl" 27169689Skan 28169689Skanclass OopMapCacheEntry: private InterpreterOopMap { 29169689Skan friend class InterpreterOopMap; 30169689Skan friend class OopMapForCacheEntry; 31169689Skan friend class OopMapCache; 3218334Speter friend class VerifyClosure; 33169689Skan 34169689Skan protected: 35169689Skan // Initialization 36169689Skan void fill(methodHandle method, int bci); 37169689Skan // fills the bit mask for native calls 3818334Speter void fill_for_native(methodHandle method); 3918334Speter void set_mask(CellTypeState* vars, CellTypeState* stack, int stack_top); 4018334Speter 4118334Speter // Deallocate bit masks and initialize fields 4218334Speter void flush(); 43169689Skan 4418334Speter private: 4518334Speter void allocate_bit_mask(); // allocates the bit mask on C heap f necessary 4618334Speter void deallocate_bit_mask(); // allocates the bit mask on C heap f necessary 4718334Speter bool verify_mask(CellTypeState *vars, CellTypeState *stack, int max_locals, int stack_top); 4818334Speter 49169689Skan public: 5018334Speter OopMapCacheEntry() : InterpreterOopMap() { 5118334Speter#ifdef ASSERT 5218334Speter _resource_allocate_bit_mask = false; 5318334Speter#endif 5418334Speter } 5518334Speter}; 56169689Skan 5718334Speter 5818334Speter// Implementation of OopMapForCacheEntry 59169689Skan// (subclass of GenerateOopMap, initializes an OopMapCacheEntry for a given method and bci) 6018334Speter 6118334Speterclass OopMapForCacheEntry: public GenerateOopMap { 6218334Speter OopMapCacheEntry *_entry; 6318334Speter int _bci; 6418334Speter int _stack_top; 6518334Speter 6618334Speter virtual bool report_results() const { return false; } 6718334Speter virtual bool possible_gc_point (BytecodeStream *bcs); 6818334Speter virtual void fill_stackmap_prolog (int nof_gc_points); 6918334Speter virtual void fill_stackmap_epilog (); 7018334Speter virtual void fill_stackmap_for_opcodes (BytecodeStream *bcs, 7118334Speter CellTypeState* vars, 7218334Speter CellTypeState* stack, 7318334Speter int stack_top); 7418334Speter virtual void fill_init_vars (GrowableArray<intptr_t> *init_vars); 75117395Skan 7618334Speter public: 77169689Skan OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry *entry); 78117395Skan 7990075Sobrien // Computes stack map for (method,bci) and initialize entry 80169689Skan void compute_map(TRAPS); 8118334Speter int size(); 8218334Speter}; 8318334Speter 8418334Speter 8518334SpeterOopMapForCacheEntry::OopMapForCacheEntry(methodHandle method, int bci, OopMapCacheEntry* entry) : GenerateOopMap(method) { 8618334Speter _bci = bci; 8718334Speter _entry = entry; 8818334Speter _stack_top = -1; 8918334Speter} 9018334Speter 9118334Speter 9218334Spetervoid OopMapForCacheEntry::compute_map(TRAPS) { 9318334Speter assert(!method()->is_native(), "cannot compute oop map for native methods"); 9418334Speter // First check if it is a method where the stackmap is always empty 9552284Sobrien if (method()->code_size() == 0 || method()->max_locals() + method()->max_stack() == 0) { 9652284Sobrien _entry->set_mask_size(0); 9752284Sobrien } else { 9852284Sobrien ResourceMark rm; 9952284Sobrien GenerateOopMap::compute_map(CATCH); 10052284Sobrien result_for_basicblock(_bci); 10152284Sobrien } 10252284Sobrien} 10318334Speter 10418334Speter 10518334Speterbool OopMapForCacheEntry::possible_gc_point(BytecodeStream *bcs) { 10618334Speter return false; // We are not reporting any result. We call result_for_basicblock directly 10718334Speter} 10818334Speter 10918334Speter 11018334Spetervoid OopMapForCacheEntry::fill_stackmap_prolog(int nof_gc_points) { 11118334Speter // Do nothing 11218334Speter} 11318334Speter 11418334Speter 11518334Spetervoid OopMapForCacheEntry::fill_stackmap_epilog() { 11618334Speter // Do nothing 11718334Speter} 11818334Speter 11918334Speter 12018334Spetervoid OopMapForCacheEntry::fill_init_vars(GrowableArray<intptr_t> *init_vars) { 12118334Speter // Do nothing 12218334Speter} 12318334Speter 12418334Speter 125169689Skanvoid OopMapForCacheEntry::fill_stackmap_for_opcodes(BytecodeStream *bcs, 126169689Skan CellTypeState* vars, 127169689Skan CellTypeState* stack, 128169689Skan int stack_top) { 129169689Skan // Only interested in one specific bci 130169689Skan if (bcs->bci() == _bci) { 131169689Skan _entry->set_mask(vars, stack, stack_top); 132169689Skan _stack_top = stack_top; 133169689Skan } 134169689Skan} 13518334Speter 13618334Speter 13718334Speterint OopMapForCacheEntry::size() { 13818334Speter assert(_stack_top != -1, "compute_map must be called first"); 13918334Speter return ((method()->is_static()) ? 0 : 1) + method()->max_locals() + _stack_top; 14018334Speter} 14118334Speter 14218334Speter 14318334Speter// Implementation of InterpreterOopMap and OopMapCacheEntry 14418334Speter 14518334Speterclass VerifyClosure : public OffsetClosure { 14618334Speter private: 147169689Skan OopMapCacheEntry* _entry; 14818334Speter bool _failed; 149169689Skan 150169689Skan public: 151169689Skan VerifyClosure(OopMapCacheEntry* entry) { _entry = entry; _failed = false; } 15218334Speter void offset_do(int offset) { if (!_entry->is_oop(offset)) _failed = true; } 153169689Skan bool failed() const { return _failed; } 154169689Skan}; 155169689Skan 156169689SkanInterpreterOopMap::InterpreterOopMap() { 157169689Skan initialize(); 158169689Skan#ifdef ASSERT 159169689Skan _resource_allocate_bit_mask = true; 160169689Skan#endif 161169689Skan} 16218334Speter 163169689SkanInterpreterOopMap::~InterpreterOopMap() { 164169689Skan // The expection is that the bit mask was allocated 165169689Skan // last in this resource area. That would make the free of the 166169689Skan // bit_mask effective (see how FREE_RESOURCE_ARRAY does a free). 167169689Skan // If it was not allocated last, there is not a correctness problem 168169689Skan // but the space for the bit_mask is not freed. 169169689Skan assert(_resource_allocate_bit_mask, "Trying to free C heap space"); 170169689Skan if (mask_size() > small_mask_limit) { 171169689Skan FREE_RESOURCE_ARRAY(uintptr_t, _bit_mask[0], mask_word_size()); 17218334Speter } 17318334Speter} 174169689Skan 17518334Speterbool InterpreterOopMap::is_empty() { 17618334Speter bool result = _method == NULL; 17718334Speter assert(_method != NULL || (_bci == 0 && 178169689Skan (_mask_size == 0 || _mask_size == USHRT_MAX) && 17918334Speter _bit_mask[0] == 0), "Should be completely empty"); 180261188Spfg return result; 181261188Spfg} 182261188Spfg 183261188Spfgvoid InterpreterOopMap::initialize() { 184261188Spfg _method = NULL; 185169689Skan _mask_size = USHRT_MAX; // This value should cause a failure quickly 186169689Skan _bci = 0; 187169689Skan _expression_stack_size = 0; 188169689Skan for (int i = 0; i < N; i++) _bit_mask[i] = 0; 189169689Skan} 190169689Skan 191169689Skan 19218334Spetervoid InterpreterOopMap::oop_iterate(OopClosure *blk) { 193169689Skan if (method() != NULL) { 194169689Skan blk->do_oop((oop*) &_method); 195169689Skan } 196169689Skan} 19718334Speter 198169689Skanvoid InterpreterOopMap::oop_iterate(OopClosure *blk, MemRegion mr) { 199169689Skan if (method() != NULL && mr.contains(&_method)) { 200169689Skan blk->do_oop((oop*) &_method); 201169689Skan } 202169689Skan} 20318334Speter 20418334Speter 20518334Speter 20618334Spetervoid InterpreterOopMap::iterate_oop(OffsetClosure* oop_closure) { 20718334Speter int n = number_of_entries(); 20818334Speter int word_index = 0; 20918334Speter uintptr_t value = 0; 210117395Skan uintptr_t mask = 0; 21118334Speter // iterate over entries 212169689Skan for (int i = 0; i < n; i++, mask <<= bits_per_entry) { 21318334Speter // get current word 21418334Speter if (mask == 0) { 21518334Speter value = bit_mask()[word_index++]; 21650397Sobrien mask = 1; 21750397Sobrien } 21850397Sobrien // test for oop 21918334Speter if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); 22018334Speter } 22118334Speter} 222169689Skan 22318334Spetervoid InterpreterOopMap::verify() { 22418334Speter // If we are doing mark sweep _method may not have a valid header 22518334Speter // $$$ This used to happen only for m/s collections; we might want to 22618334Speter // think of an appropriate generalization of this distinction. 22718334Speter guarantee(Universe::heap()->is_gc_active() || _method->is_oop_or_null(), 228169689Skan "invalid oop in oopMapCache"); 22918334Speter} 23018334Speter 23118334Speter#ifdef ENABLE_ZAP_DEAD_LOCALS 23218334Speter 23318334Spetervoid InterpreterOopMap::iterate_all(OffsetClosure* oop_closure, OffsetClosure* value_closure, OffsetClosure* dead_closure) { 234169689Skan int n = number_of_entries(); 23518334Speter int word_index = 0; 236169689Skan uintptr_t value = 0; 237169689Skan uintptr_t mask = 0; 238169689Skan // iterate over entries 239169689Skan for (int i = 0; i < n; i++, mask <<= bits_per_entry) { 240169689Skan // get current word 241169689Skan if (mask == 0) { 242169689Skan value = bit_mask()[word_index++]; 24318334Speter mask = 1; 24418334Speter } 24518334Speter // test for dead values & oops, and for live values 24618334Speter if ((value & (mask << dead_bit_number)) != 0) dead_closure->offset_do(i); // call this for all dead values or oops 24718334Speter else if ((value & (mask << oop_bit_number)) != 0) oop_closure->offset_do(i); // call this for all live oops 24818334Speter else value_closure->offset_do(i); // call this for all live values 249169689Skan } 25018334Speter} 251169689Skan 252169689Skan#endif 253169689Skan 254169689Skan 255169689Skanvoid InterpreterOopMap::print() { 256169689Skan int n = number_of_entries(); 257169689Skan tty->print("oop map for "); 25818334Speter method()->print_value(); 25918334Speter tty->print(" @ %d = [%d] { ", bci(), n); 26018334Speter for (int i = 0; i < n; i++) { 26118334Speter#ifdef ENABLE_ZAP_DEAD_LOCALS 262169689Skan if (is_dead(i)) tty->print("%d+ ", i); 26318334Speter else 26418334Speter#endif 26518334Speter if (is_oop(i)) tty->print("%d ", i); 26618334Speter } 26718334Speter tty->print_cr("}"); 26818334Speter} 269169689Skan 270169689Skanclass MaskFillerForNative: public NativeSignatureIterator { 271169689Skan private: 272169689Skan uintptr_t * _mask; // the bit mask to be filled 273169689Skan int _size; // the mask size in bits 274169689Skan 275169689Skan void set_one(int i) { 276169689Skan i *= InterpreterOopMap::bits_per_entry; 27718334Speter assert(0 <= i && i < _size, "offset out of bounds"); 278132718Skan _mask[i / BitsPerWord] |= (((uintptr_t) 1 << InterpreterOopMap::oop_bit_number) << (i % BitsPerWord)); 279169689Skan } 28018334Speter 28118334Speter public: 282132718Skan void pass_int() { /* ignore */ } 283169689Skan void pass_long() { /* ignore */ } 28418334Speter#if defined(_LP64) || defined(ZERO) 28596263Sobrien void pass_float() { /* ignore */ } 286169689Skan#endif 28796263Sobrien void pass_double() { /* ignore */ } 288169689Skan void pass_object() { set_one(offset()); } 289169689Skan 29018334Speter MaskFillerForNative(methodHandle method, uintptr_t* mask, int size) : NativeSignatureIterator(method) { 291132718Skan _mask = mask; 292132718Skan _size = size; 293132718Skan // initialize with 0 294132718Skan int i = (size + BitsPerWord - 1) / BitsPerWord; 295132718Skan while (i-- > 0) _mask[i] = 0; 296132718Skan } 297132718Skan 298132718Skan void generate() { 299132718Skan NativeSignatureIterator::iterate(); 300132718Skan } 301132718Skan}; 302132718Skan 30318334Speterbool OopMapCacheEntry::verify_mask(CellTypeState* vars, CellTypeState* stack, int max_locals, int stack_top) { 30418334Speter // Check mask includes map 30518334Speter VerifyClosure blk(this); 30618334Speter iterate_oop(&blk); 30718334Speter if (blk.failed()) return false; 30818334Speter 30918334Speter // Check if map is generated correctly 31018334Speter // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards) 31118334Speter if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals); 31218334Speter 313132718Skan for(int i = 0; i < max_locals; i++) { 314132718Skan bool v1 = is_oop(i) ? true : false; 315132718Skan bool v2 = vars[i].is_reference() ? true : false; 31618334Speter assert(v1 == v2, "locals oop mask generation error"); 31718334Speter if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); 31818334Speter#ifdef ENABLE_ZAP_DEAD_LOCALS 31918334Speter bool v3 = is_dead(i) ? true : false; 32018334Speter bool v4 = !vars[i].is_live() ? true : false; 32118334Speter assert(v3 == v4, "locals live mask generation error"); 32218334Speter assert(!(v1 && v3), "dead value marked as oop"); 32318334Speter#endif 32418334Speter } 32518334Speter 32618334Speter if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); } 32718334Speter for(int j = 0; j < stack_top; j++) { 32818334Speter bool v1 = is_oop(max_locals + j) ? true : false; 32918334Speter bool v2 = stack[j].is_reference() ? true : false; 330169689Skan assert(v1 == v2, "stack oop mask generation error"); 331169689Skan if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0); 33218334Speter#ifdef ENABLE_ZAP_DEAD_LOCALS 33352284Sobrien bool v3 = is_dead(max_locals + j) ? true : false; 33452284Sobrien bool v4 = !stack[j].is_live() ? true : false; 33552284Sobrien assert(v3 == v4, "stack live mask generation error"); 33652284Sobrien assert(!(v1 && v3), "dead value marked as oop"); 33718334Speter#endif 33818334Speter } 33918334Speter if (TraceOopMapGeneration && Verbose) tty->cr(); 34018334Speter return true; 34118334Speter} 34218334Speter 34318334Spetervoid OopMapCacheEntry::allocate_bit_mask() { 34418334Speter if (mask_size() > small_mask_limit) { 345117395Skan assert(_bit_mask[0] == 0, "bit mask should be new or just flushed"); 34618334Speter _bit_mask[0] = (intptr_t) 34718334Speter NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size()); 34818334Speter } 349169689Skan} 350169689Skan 351169689Skanvoid OopMapCacheEntry::deallocate_bit_mask() { 352169689Skan if (mask_size() > small_mask_limit && _bit_mask[0] != 0) { 353169689Skan assert(!Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 354169689Skan "This bit mask should not be in the resource area"); 355169689Skan FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]); 356169689Skan debug_only(_bit_mask[0] = 0;) 357169689Skan } 358169689Skan} 359169689Skan 360169689Skan 36150397Sobrienvoid OopMapCacheEntry::fill_for_native(methodHandle mh) { 362169689Skan assert(mh->is_native(), "method must be native method"); 363169689Skan set_mask_size(mh->size_of_parameters() * bits_per_entry); 364169689Skan allocate_bit_mask(); 365169689Skan // fill mask for parameters 366169689Skan MaskFillerForNative mf(mh, bit_mask(), mask_size()); 367169689Skan mf.generate(); 36850397Sobrien} 36950397Sobrien 370169689Skan 371132718Skanvoid OopMapCacheEntry::fill(methodHandle method, int bci) { 372132718Skan HandleMark hm; 373132718Skan // Flush entry to deallocate an existing entry 374169689Skan flush(); 375169689Skan set_method(method()); 37618334Speter set_bci(bci); 37718334Speter if (method->is_native()) { 37818334Speter // Native method activations have oops only among the parameters and one 37918334Speter // extra oop following the parameters (the mirror for static native methods). 380169689Skan fill_for_native(method); 381169689Skan } else { 382169689Skan EXCEPTION_MARK; 383169689Skan OopMapForCacheEntry gen(method, bci, this); 384169689Skan gen.compute_map(CATCH); 38518334Speter } 38618334Speter #ifdef ASSERT 38718334Speter verify(); 38818334Speter #endif 38918334Speter} 39018334Speter 39118334Speter 392169689Skanvoid OopMapCacheEntry::set_mask(CellTypeState *vars, CellTypeState *stack, int stack_top) { 393169689Skan // compute bit mask size 394169689Skan int max_locals = method()->max_locals(); 395169689Skan int n_entries = max_locals + stack_top; 396169689Skan set_mask_size(n_entries * bits_per_entry); 397169689Skan allocate_bit_mask(); 398169689Skan set_expression_stack_size(stack_top); 39918334Speter 400169689Skan // compute bits 40118334Speter int word_index = 0; 402169689Skan uintptr_t value = 0; 403169689Skan uintptr_t mask = 1; 404169689Skan 40518334Speter CellTypeState* cell = vars; 406169689Skan for (int entry_index = 0; entry_index < n_entries; entry_index++, mask <<= bits_per_entry, cell++) { 407169689Skan // store last word 408169689Skan if (mask == 0) { 409169689Skan bit_mask()[word_index++] = value; 410169689Skan value = 0; 411169689Skan mask = 1; 412169689Skan } 41390075Sobrien 414169689Skan // switch to stack when done with locals 415169689Skan if (entry_index == max_locals) { 416169689Skan cell = stack; 417169689Skan } 418169689Skan 41918334Speter // set oop bit 42090075Sobrien if ( cell->is_reference()) { 42190075Sobrien value |= (mask << oop_bit_number ); 42290075Sobrien } 423169689Skan 42490075Sobrien #ifdef ENABLE_ZAP_DEAD_LOCALS 425169689Skan // set dead bit 426169689Skan if (!cell->is_live()) { 427169689Skan value |= (mask << dead_bit_number); 428169689Skan assert(!cell->is_reference(), "dead value marked as oop"); 429169689Skan } 430169689Skan #endif 431169689Skan } 432169689Skan 433169689Skan // make sure last word is stored 434169689Skan bit_mask()[word_index] = value; 435169689Skan 43690075Sobrien // verify bit mask 437169689Skan assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified"); 438169689Skan 439169689Skan 440169689Skan} 441169689Skan 442169689Skanvoid OopMapCacheEntry::flush() { 44318334Speter deallocate_bit_mask(); 44418334Speter initialize(); 445169689Skan} 446169689Skan 44718334Speter 44850397Sobrien// Implementation of OopMapCache 449169689Skan 450169689Skan#ifndef PRODUCT 451169689Skan 45250397Sobrienstatic long _total_memory_usage = 0; 45350397Sobrien 45418334Speterlong OopMapCache::memory_usage() { 45550397Sobrien return _total_memory_usage; 456169689Skan} 457169689Skan 45850397Sobrien#endif 45950397Sobrien 46050397Sobrienvoid InterpreterOopMap::resource_copy(OopMapCacheEntry* from) { 46150397Sobrien assert(_resource_allocate_bit_mask, 46250397Sobrien "Should not resource allocate the _bit_mask"); 46350397Sobrien assert(from->method()->is_oop(), "MethodOop is bad"); 46418334Speter 46550397Sobrien set_method(from->method()); 46650397Sobrien set_bci(from->bci()); 467169689Skan set_mask_size(from->mask_size()); 46818334Speter set_expression_stack_size(from->expression_stack_size()); 46918334Speter 470169689Skan // Is the bit mask contained in the entry? 47118334Speter if (from->mask_size() <= small_mask_limit) { 47218334Speter memcpy((void *)_bit_mask, (void *)from->_bit_mask, 473169689Skan mask_word_size() * BytesPerWord); 474169689Skan } else { 475169689Skan // The expectation is that this InterpreterOopMap is a recently created 47618334Speter // and empty. It is used to get a copy of a cached entry. 47718334Speter // If the bit mask has a value, it should be in the 478169689Skan // resource area. 479169689Skan assert(_bit_mask[0] == 0 || 480169689Skan Thread::current()->resource_area()->contains((void*)_bit_mask[0]), 481169689Skan "The bit mask should have been allocated from a resource area"); 482169689Skan // Allocate the bit_mask from a Resource area for performance. Allocating 483169689Skan // from the C heap as is done for OopMapCache has a significant 48418334Speter // performance impact. 48518334Speter _bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size()); 48618334Speter assert(_bit_mask[0] != 0, "bit mask was not allocated"); 48718334Speter memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0], 48850397Sobrien mask_word_size() * BytesPerWord); 48990075Sobrien } 49090075Sobrien} 49190075Sobrien 49290075Sobrieninline unsigned int OopMapCache::hash_value_for(methodHandle method, int bci) { 493169689Skan // We use method->code_size() rather than method->identity_hash() below since 49418334Speter // the mark may not be present if a pointer to the method is already reversed. 495169689Skan return ((unsigned int) bci) 496169689Skan ^ ((unsigned int) method->max_locals() << 2) 497169689Skan ^ ((unsigned int) method->code_size() << 4) 498169689Skan ^ ((unsigned int) method->size_of_parameters() << 6); 499169689Skan} 500169689Skan 501169689Skan 502169689SkanOopMapCache::OopMapCache() : 503169689Skan _mut(Mutex::leaf, "An OopMapCache lock", true) 504169689Skan{ 505169689Skan _array = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size); 506169689Skan // Cannot call flush for initialization, since flush 50718334Speter // will check if memory should be deallocated 508169689Skan for(int i = 0; i < _size; i++) _array[i].initialize(); 509169689Skan NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) 51018334Speter} 511169689Skan 51218334Speter 51318334SpeterOopMapCache::~OopMapCache() { 51418334Speter assert(_array != NULL, "sanity check"); 51518334Speter // Deallocate oop maps that are allocated out-of-line 51618334Speter flush(); 517169689Skan // Deallocate array 51818334Speter NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);) 51918334Speter FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array); 52018334Speter} 52118334Speter 522169689SkanOopMapCacheEntry* OopMapCache::entry_at(int i) const { 52318334Speter return &_array[i % _size]; 52418334Speter} 52518334Speter 52618334Spetervoid OopMapCache::flush() { 52718334Speter for (int i = 0; i < _size; i++) _array[i].flush(); 528169689Skan} 52918334Speter 53018334Spetervoid OopMapCache::flush_obsolete_entries() { 53118334Speter for (int i = 0; i < _size; i++) 532169689Skan if (!_array[i].is_empty() && _array[i].method()->is_old()) { 533169689Skan // Cache entry is occupied by an old redefined method and we don't want 534169689Skan // to pin it down so flush the entry. 53518334Speter RC_TRACE(0x08000000, ("flush: %s(%s): cached entry @%d", 53618334Speter _array[i].method()->name()->as_C_string(), 537169689Skan _array[i].method()->signature()->as_C_string(), i)); 538169689Skan 539169689Skan _array[i].flush(); 540169689Skan } 541169689Skan} 54218334Speter 54318334Spetervoid OopMapCache::oop_iterate(OopClosure *blk) { 54418334Speter for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk); 54518334Speter} 54618334Speter 54718334Spetervoid OopMapCache::oop_iterate(OopClosure *blk, MemRegion mr) { 54818334Speter for (int i = 0; i < _size; i++) _array[i].oop_iterate(blk, mr); 54918334Speter} 55018334Speter 55118334Spetervoid OopMapCache::verify() { 55218334Speter for (int i = 0; i < _size; i++) _array[i].verify(); 55318334Speter} 55418334Speter 55518334Spetervoid OopMapCache::lookup(methodHandle method, 55618334Speter int bci, 55718334Speter InterpreterOopMap* entry_for) { 558169689Skan MutexLocker x(&_mut); 55918334Speter 56018334Speter OopMapCacheEntry* entry = NULL; 56190075Sobrien int probe = hash_value_for(method, bci); 56290075Sobrien 56390075Sobrien // Search hashtable for match 564132718Skan int i; 565132718Skan for(i = 0; i < _probe_depth; i++) { 56618334Speter entry = entry_at(probe + i); 56790075Sobrien if (entry->match(method, bci)) { 56890075Sobrien entry_for->resource_copy(entry); 56990075Sobrien assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 57090075Sobrien return; 57190075Sobrien } 57290075Sobrien } 57390075Sobrien 57490075Sobrien if (TraceOopMapGeneration) { 57518334Speter static int count = 0; 57690075Sobrien ResourceMark rm; 57790075Sobrien tty->print("%d - Computing oopmap at bci %d for ", ++count, bci); 57890075Sobrien method->print_value(); tty->cr(); 57918334Speter } 58090075Sobrien 58190075Sobrien // Entry is not in hashtable. 58290075Sobrien // Compute entry and return it 58390075Sobrien 58490075Sobrien if (method->should_not_be_cached()) { 58518334Speter // It is either not safe or not a good idea to cache this methodOop 58690075Sobrien // at this time. We give the caller of lookup() a copy of the 58790075Sobrien // interesting info via parameter entry_for, but we don't add it to 58890075Sobrien // the cache. See the gory details in methodOop.cpp. 589132718Skan compute_one_oop_map(method, bci, entry_for); 590132718Skan return; 591132718Skan } 592132718Skan 593132718Skan // First search for an empty slot 594132718Skan for(i = 0; i < _probe_depth; i++) { 595132718Skan entry = entry_at(probe + i); 596169689Skan if (entry->is_empty()) { 597169689Skan entry->fill(method, bci); 598169689Skan entry_for->resource_copy(entry); 599169689Skan assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 600169689Skan return; 601169689Skan } 60218334Speter } 60390075Sobrien 60490075Sobrien if (TraceOopMapGeneration) { 60590075Sobrien ResourceMark rm; 606169689Skan tty->print_cr("*** collision in oopmap cache - flushing item ***"); 60790075Sobrien } 608169689Skan 609169689Skan // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm 610169689Skan //entry_at(probe + _probe_depth - 1)->flush(); 61118334Speter //for(i = _probe_depth - 1; i > 0; i--) { 61250397Sobrien // // Coping entry[i] = entry[i-1]; 613169689Skan // OopMapCacheEntry *to = entry_at(probe + i); 614169689Skan // OopMapCacheEntry *from = entry_at(probe + i - 1); 615169689Skan // to->copy(from); 61618334Speter // } 61718334Speter 618169689Skan assert(method->is_method(), "gaga"); 61918334Speter 62018334Speter entry = entry_at(probe + 0); 621169689Skan entry->fill(method, bci); 62218334Speter 62318334Speter // Copy the newly cached entry to input parameter 624169689Skan entry_for->resource_copy(entry); 62518334Speter 62618334Speter if (TraceOopMapGeneration) { 627169689Skan ResourceMark rm; 62818334Speter tty->print("Done with "); 62918334Speter method->print_value(); tty->cr(); 630169689Skan } 631169689Skan assert(!entry_for->is_empty(), "A non-empty oop map should be returned"); 632169689Skan 633169689Skan return; 63418334Speter} 63550397Sobrien 636169689Skanvoid OopMapCache::compute_one_oop_map(methodHandle method, int bci, InterpreterOopMap* entry) { 63718334Speter // Due to the invariants above it's tricky to allocate a temporary OopMapCacheEntry on the stack 63818334Speter OopMapCacheEntry* tmp = NEW_C_HEAP_ARRAY(OopMapCacheEntry, 1); 63918334Speter tmp->initialize(); 640169689Skan tmp->fill(method, bci); 64118334Speter entry->resource_copy(tmp); 64218334Speter FREE_C_HEAP_ARRAY(OopMapCacheEntry, tmp); 64318334Speter} 64418334Speter