stubRoutines_sparc.hpp revision 0:a61af66fc99e
1/* 2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25// This file holds the platform specific parts of the StubRoutines 26// definition. See stubRoutines.hpp for a description on how to 27// extend it. 28 29 30// So unfortunately c2 will call with a pc from a frame object 31// (already adjusted) and a raw pc (unadjusted), so we need to check both. 32// It didn't use to be like this before adapter removal. 33static bool returns_to_call_stub(address return_pc) { 34 return ((return_pc + frame::pc_return_offset) == _call_stub_return_address) || 35 (return_pc == _call_stub_return_address ); 36} 37 38enum /* platform_dependent_constants */ { 39 // %%%%%%%% May be able to shrink this a lot 40 code_size1 = 20000, // simply increase if too small (assembler will crash if too small) 41 code_size2 = 20000 // simply increase if too small (assembler will crash if too small) 42}; 43 44class Sparc { 45 friend class StubGenerator; 46 47 public: 48 enum { nof_instance_allocators = 10 }; 49 50 // allocator lock values 51 enum { 52 unlocked = 0, 53 locked = 1 54 }; 55 56 enum { 57 v8_oop_lock_ignore_bits = 2, 58 v8_oop_lock_bits = 4, 59 nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits), 60 v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits), 61 v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits 62 }; 63 64 static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries]; 65 66 private: 67 static address _test_stop_entry; 68 static address _stop_subroutine_entry; 69 static address _flush_callers_register_windows_entry; 70 71 static int _atomic_memory_operation_lock; 72 73 static address _partial_subtype_check; 74 75 public: 76 // %%% global lock for everyone who needs to use atomic_compare_and_exchange 77 // %%% or atomic_increment -- should probably use more locks for more 78 // %%% scalability-- for instance one for each eden space or group of 79 80 // address of the lock for atomic_compare_and_exchange 81 static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; } 82 83 // accessor and mutator for _atomic_memory_operation_lock 84 static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; } 85 static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; } 86 87 // test assembler stop routine by setting registers 88 static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); } 89 90 // a subroutine for debugging assembler code 91 static address stop_subroutine_entry_address() { return (address)&_stop_subroutine_entry; } 92 93 // flushes (all but current) register window 94 static intptr_t* (*flush_callers_register_windows_func())() { return CAST_TO_FN_PTR(intptr_t* (*)(void), _flush_callers_register_windows_entry); } 95 96 static address partial_subtype_check() { return _partial_subtype_check; } 97}; 98