sharedRuntime_sparc.cpp revision 356:1ee8caae33af
1/* 2 * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25#include "incls/_precompiled.incl" 26#include "incls/_sharedRuntime_sparc.cpp.incl" 27 28#define __ masm-> 29 30#ifdef COMPILER2 31UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob; 32#endif // COMPILER2 33 34DeoptimizationBlob* SharedRuntime::_deopt_blob; 35SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob; 36SafepointBlob* SharedRuntime::_polling_page_return_handler_blob; 37RuntimeStub* SharedRuntime::_wrong_method_blob; 38RuntimeStub* SharedRuntime::_ic_miss_blob; 39RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; 40RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; 41RuntimeStub* SharedRuntime::_resolve_static_call_blob; 42 43class RegisterSaver { 44 45 // Used for saving volatile registers. This is Gregs, Fregs, I/L/O. 46 // The Oregs are problematic. In the 32bit build the compiler can 47 // have O registers live with 64 bit quantities. A window save will 48 // cut the heads off of the registers. We have to do a very extensive 49 // stack dance to save and restore these properly. 50 51 // Note that the Oregs problem only exists if we block at either a polling 52 // page exception a compiled code safepoint that was not originally a call 53 // or deoptimize following one of these kinds of safepoints. 54 55 // Lots of registers to save. For all builds, a window save will preserve 56 // the %i and %l registers. For the 32-bit longs-in-two entries and 64-bit 57 // builds a window-save will preserve the %o registers. In the LION build 58 // we need to save the 64-bit %o registers which requires we save them 59 // before the window-save (as then they become %i registers and get their 60 // heads chopped off on interrupt). We have to save some %g registers here 61 // as well. 62 enum { 63 // This frame's save area. Includes extra space for the native call: 64 // vararg's layout space and the like. Briefly holds the caller's 65 // register save area. 66 call_args_area = frame::register_save_words_sp_offset + 67 frame::memory_parameter_word_sp_offset*wordSize, 68 // Make sure save locations are always 8 byte aligned. 69 // can't use round_to because it doesn't produce compile time constant 70 start_of_extra_save_area = ((call_args_area + 7) & ~7), 71 g1_offset = start_of_extra_save_area, // g-regs needing saving 72 g3_offset = g1_offset+8, 73 g4_offset = g3_offset+8, 74 g5_offset = g4_offset+8, 75 o0_offset = g5_offset+8, 76 o1_offset = o0_offset+8, 77 o2_offset = o1_offset+8, 78 o3_offset = o2_offset+8, 79 o4_offset = o3_offset+8, 80 o5_offset = o4_offset+8, 81 start_of_flags_save_area = o5_offset+8, 82 ccr_offset = start_of_flags_save_area, 83 fsr_offset = ccr_offset + 8, 84 d00_offset = fsr_offset+8, // Start of float save area 85 register_save_size = d00_offset+8*32 86 }; 87 88 89 public: 90 91 static int Oexception_offset() { return o0_offset; }; 92 static int G3_offset() { return g3_offset; }; 93 static int G5_offset() { return g5_offset; }; 94 static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words); 95 static void restore_live_registers(MacroAssembler* masm); 96 97 // During deoptimization only the result register need to be restored 98 // all the other values have already been extracted. 99 100 static void restore_result_registers(MacroAssembler* masm); 101}; 102 103OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) { 104 // Record volatile registers as callee-save values in an OopMap so their save locations will be 105 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 106 // deoptimization; see compiledVFrame::create_stack_value). The caller's I, L and O registers 107 // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame 108 // (as the stub's I's) when the runtime routine called by the stub creates its frame. 109 int i; 110 // Always make the frame size 16 bytr aligned. 111 int frame_size = round_to(additional_frame_words + register_save_size, 16); 112 // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words 113 int frame_size_in_slots = frame_size / sizeof(jint); 114 // CodeBlob frame size is in words. 115 *total_frame_words = frame_size / wordSize; 116 // OopMap* map = new OopMap(*total_frame_words, 0); 117 OopMap* map = new OopMap(frame_size_in_slots, 0); 118 119#if !defined(_LP64) 120 121 // Save 64-bit O registers; they will get their heads chopped off on a 'save'. 122 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 123 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 124 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 125 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 126 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 127 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 128#endif /* _LP64 */ 129 130 __ save(SP, -frame_size, SP); 131 132#ifndef _LP64 133 // Reload the 64 bit Oregs. Although they are now Iregs we load them 134 // to Oregs here to avoid interrupts cutting off their heads 135 136 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 137 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 138 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 139 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 140 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 141 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 142 143 __ stx(O0, SP, o0_offset+STACK_BIAS); 144 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg()); 145 146 __ stx(O1, SP, o1_offset+STACK_BIAS); 147 148 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg()); 149 150 __ stx(O2, SP, o2_offset+STACK_BIAS); 151 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg()); 152 153 __ stx(O3, SP, o3_offset+STACK_BIAS); 154 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg()); 155 156 __ stx(O4, SP, o4_offset+STACK_BIAS); 157 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg()); 158 159 __ stx(O5, SP, o5_offset+STACK_BIAS); 160 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg()); 161#endif /* _LP64 */ 162 163 164#ifdef _LP64 165 int debug_offset = 0; 166#else 167 int debug_offset = 4; 168#endif 169 // Save the G's 170 __ stx(G1, SP, g1_offset+STACK_BIAS); 171 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg()); 172 173 __ stx(G3, SP, g3_offset+STACK_BIAS); 174 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg()); 175 176 __ stx(G4, SP, g4_offset+STACK_BIAS); 177 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg()); 178 179 __ stx(G5, SP, g5_offset+STACK_BIAS); 180 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg()); 181 182 // This is really a waste but we'll keep things as they were for now 183 if (true) { 184#ifndef _LP64 185 map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next()); 186 map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next()); 187 map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next()); 188 map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next()); 189 map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next()); 190 map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next()); 191 map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next()); 192 map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next()); 193 map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next()); 194 map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next()); 195#endif /* _LP64 */ 196 } 197 198 199 // Save the flags 200 __ rdccr( G5 ); 201 __ stx(G5, SP, ccr_offset+STACK_BIAS); 202 __ stxfsr(SP, fsr_offset+STACK_BIAS); 203 204 // Save all the FP registers 205 int offset = d00_offset; 206 for( int i=0; i<64; i+=2 ) { 207 FloatRegister f = as_FloatRegister(i); 208 __ stf(FloatRegisterImpl::D, f, SP, offset+STACK_BIAS); 209 map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg()); 210 if (true) { 211 map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next()); 212 } 213 offset += sizeof(double); 214 } 215 216 // And we're done. 217 218 return map; 219} 220 221 222// Pop the current frame and restore all the registers that we 223// saved. 224void RegisterSaver::restore_live_registers(MacroAssembler* masm) { 225 226 // Restore all the FP registers 227 for( int i=0; i<64; i+=2 ) { 228 __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i)); 229 } 230 231 __ ldx(SP, ccr_offset+STACK_BIAS, G1); 232 __ wrccr (G1) ; 233 234 // Restore the G's 235 // Note that G2 (AKA GThread) must be saved and restored separately. 236 // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr. 237 238 __ ldx(SP, g1_offset+STACK_BIAS, G1); 239 __ ldx(SP, g3_offset+STACK_BIAS, G3); 240 __ ldx(SP, g4_offset+STACK_BIAS, G4); 241 __ ldx(SP, g5_offset+STACK_BIAS, G5); 242 243 244#if !defined(_LP64) 245 // Restore the 64-bit O's. 246 __ ldx(SP, o0_offset+STACK_BIAS, O0); 247 __ ldx(SP, o1_offset+STACK_BIAS, O1); 248 __ ldx(SP, o2_offset+STACK_BIAS, O2); 249 __ ldx(SP, o3_offset+STACK_BIAS, O3); 250 __ ldx(SP, o4_offset+STACK_BIAS, O4); 251 __ ldx(SP, o5_offset+STACK_BIAS, O5); 252 253 // And temporarily place them in TLS 254 255 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 256 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 257 __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8); 258 __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8); 259 __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8); 260 __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8); 261#endif /* _LP64 */ 262 263 // Restore flags 264 265 __ ldxfsr(SP, fsr_offset+STACK_BIAS); 266 267 __ restore(); 268 269#if !defined(_LP64) 270 // Now reload the 64bit Oregs after we've restore the window. 271 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 272 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 273 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2); 274 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3); 275 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4); 276 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5); 277#endif /* _LP64 */ 278 279} 280 281// Pop the current frame and restore the registers that might be holding 282// a result. 283void RegisterSaver::restore_result_registers(MacroAssembler* masm) { 284 285#if !defined(_LP64) 286 // 32bit build returns longs in G1 287 __ ldx(SP, g1_offset+STACK_BIAS, G1); 288 289 // Retrieve the 64-bit O's. 290 __ ldx(SP, o0_offset+STACK_BIAS, O0); 291 __ ldx(SP, o1_offset+STACK_BIAS, O1); 292 // and save to TLS 293 __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8); 294 __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8); 295#endif /* _LP64 */ 296 297 __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0)); 298 299 __ restore(); 300 301#if !defined(_LP64) 302 // Now reload the 64bit Oregs after we've restore the window. 303 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0); 304 __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1); 305#endif /* _LP64 */ 306 307} 308 309// The java_calling_convention describes stack locations as ideal slots on 310// a frame with no abi restrictions. Since we must observe abi restrictions 311// (like the placement of the register window) the slots must be biased by 312// the following value. 313static int reg2offset(VMReg r) { 314 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 315} 316 317// --------------------------------------------------------------------------- 318// Read the array of BasicTypes from a signature, and compute where the 319// arguments should go. Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size) 320// quantities. Values less than VMRegImpl::stack0 are registers, those above 321// refer to 4-byte stack slots. All stack slots are based off of the window 322// top. VMRegImpl::stack0 refers to the first slot past the 16-word window, 323// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher. Register 324// values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit 325// integer registers. Values 64-95 are the (32-bit only) float registers. 326// Each 32-bit quantity is given its own number, so the integer registers 327// (in either 32- or 64-bit builds) use 2 numbers. For example, there is 328// an O0-low and an O0-high. Essentially, all int register numbers are doubled. 329 330// Register results are passed in O0-O5, for outgoing call arguments. To 331// convert to incoming arguments, convert all O's to I's. The regs array 332// refer to the low and hi 32-bit words of 64-bit registers or stack slots. 333// If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a 334// 32-bit value was passed). If both are VMRegImpl::Bad(), it means no value was 335// passed (used as a placeholder for the other half of longs and doubles in 336// the 64-bit build). regs[].second() is either VMRegImpl::Bad() or regs[].second() is 337// regs[].first()+1 (regs[].first() may be misaligned in the C calling convention). 338// Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first() 339// == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the 340// same VMRegPair. 341 342// Note: the INPUTS in sig_bt are in units of Java argument words, which are 343// either 32-bit or 64-bit depending on the build. The OUTPUTS are in 32-bit 344// units regardless of build. 345 346 347// --------------------------------------------------------------------------- 348// The compiled Java calling convention. The Java convention always passes 349// 64-bit values in adjacent aligned locations (either registers or stack), 350// floats in float registers and doubles in aligned float pairs. Values are 351// packed in the registers. There is no backing varargs store for values in 352// registers. In the 32-bit build, longs are passed in G1 and G4 (cannot be 353// passed in I's, because longs in I's get their heads chopped off at 354// interrupt). 355int SharedRuntime::java_calling_convention(const BasicType *sig_bt, 356 VMRegPair *regs, 357 int total_args_passed, 358 int is_outgoing) { 359 assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers"); 360 361 // Convention is to pack the first 6 int/oop args into the first 6 registers 362 // (I0-I5), extras spill to the stack. Then pack the first 8 float args 363 // into F0-F7, extras spill to the stack. Then pad all register sets to 364 // align. Then put longs and doubles into the same registers as they fit, 365 // else spill to the stack. 366 const int int_reg_max = SPARC_ARGS_IN_REGS_NUM; 367 const int flt_reg_max = 8; 368 // 369 // Where 32-bit 1-reg longs start being passed 370 // In tiered we must pass on stack because c1 can't use a "pair" in a single reg. 371 // So make it look like we've filled all the G regs that c2 wants to use. 372 Register g_reg = TieredCompilation ? noreg : G1; 373 374 // Count int/oop and float args. See how many stack slots we'll need and 375 // where the longs & doubles will go. 376 int int_reg_cnt = 0; 377 int flt_reg_cnt = 0; 378 // int stk_reg_pairs = frame::register_save_words*(wordSize>>2); 379 // int stk_reg_pairs = SharedRuntime::out_preserve_stack_slots(); 380 int stk_reg_pairs = 0; 381 for (int i = 0; i < total_args_passed; i++) { 382 switch (sig_bt[i]) { 383 case T_LONG: // LP64, longs compete with int args 384 assert(sig_bt[i+1] == T_VOID, ""); 385#ifdef _LP64 386 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 387#endif 388 break; 389 case T_OBJECT: 390 case T_ARRAY: 391 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 392 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 393#ifndef _LP64 394 else stk_reg_pairs++; 395#endif 396 break; 397 case T_INT: 398 case T_SHORT: 399 case T_CHAR: 400 case T_BYTE: 401 case T_BOOLEAN: 402 if (int_reg_cnt < int_reg_max) int_reg_cnt++; 403 else stk_reg_pairs++; 404 break; 405 case T_FLOAT: 406 if (flt_reg_cnt < flt_reg_max) flt_reg_cnt++; 407 else stk_reg_pairs++; 408 break; 409 case T_DOUBLE: 410 assert(sig_bt[i+1] == T_VOID, ""); 411 break; 412 case T_VOID: 413 break; 414 default: 415 ShouldNotReachHere(); 416 } 417 } 418 419 // This is where the longs/doubles start on the stack. 420 stk_reg_pairs = (stk_reg_pairs+1) & ~1; // Round 421 422 int int_reg_pairs = (int_reg_cnt+1) & ~1; // 32-bit 2-reg longs only 423 int flt_reg_pairs = (flt_reg_cnt+1) & ~1; 424 425 // int stk_reg = frame::register_save_words*(wordSize>>2); 426 // int stk_reg = SharedRuntime::out_preserve_stack_slots(); 427 int stk_reg = 0; 428 int int_reg = 0; 429 int flt_reg = 0; 430 431 // Now do the signature layout 432 for (int i = 0; i < total_args_passed; i++) { 433 switch (sig_bt[i]) { 434 case T_INT: 435 case T_SHORT: 436 case T_CHAR: 437 case T_BYTE: 438 case T_BOOLEAN: 439#ifndef _LP64 440 case T_OBJECT: 441 case T_ARRAY: 442 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 443#endif // _LP64 444 if (int_reg < int_reg_max) { 445 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 446 regs[i].set1(r->as_VMReg()); 447 } else { 448 regs[i].set1(VMRegImpl::stack2reg(stk_reg++)); 449 } 450 break; 451 452#ifdef _LP64 453 case T_OBJECT: 454 case T_ARRAY: 455 case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address 456 if (int_reg < int_reg_max) { 457 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 458 regs[i].set2(r->as_VMReg()); 459 } else { 460 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 461 stk_reg_pairs += 2; 462 } 463 break; 464#endif // _LP64 465 466 case T_LONG: 467 assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half"); 468#ifdef COMPILER2 469#ifdef _LP64 470 // Can't be tiered (yet) 471 if (int_reg < int_reg_max) { 472 Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++); 473 regs[i].set2(r->as_VMReg()); 474 } else { 475 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 476 stk_reg_pairs += 2; 477 } 478#else 479 // For 32-bit build, can't pass longs in O-regs because they become 480 // I-regs and get trashed. Use G-regs instead. G1 and G4 are almost 481 // spare and available. This convention isn't used by the Sparc ABI or 482 // anywhere else. If we're tiered then we don't use G-regs because c1 483 // can't deal with them as a "pair". 484 // G0: zero 485 // G1: 1st Long arg 486 // G2: global allocated to TLS 487 // G3: used in inline cache check 488 // G4: 2nd Long arg 489 // G5: used in inline cache check 490 // G6: used by OS 491 // G7: used by OS 492 493 if (g_reg == G1) { 494 regs[i].set2(G1->as_VMReg()); // This long arg in G1 495 g_reg = G4; // Where the next arg goes 496 } else if (g_reg == G4) { 497 regs[i].set2(G4->as_VMReg()); // The 2nd long arg in G4 498 g_reg = noreg; // No more longs in registers 499 } else { 500 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 501 stk_reg_pairs += 2; 502 } 503#endif // _LP64 504#else // COMPILER2 505 if (int_reg_pairs + 1 < int_reg_max) { 506 if (is_outgoing) { 507 regs[i].set_pair(as_oRegister(int_reg_pairs + 1)->as_VMReg(), as_oRegister(int_reg_pairs)->as_VMReg()); 508 } else { 509 regs[i].set_pair(as_iRegister(int_reg_pairs + 1)->as_VMReg(), as_iRegister(int_reg_pairs)->as_VMReg()); 510 } 511 int_reg_pairs += 2; 512 } else { 513 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 514 stk_reg_pairs += 2; 515 } 516#endif // COMPILER2 517 break; 518 519 case T_FLOAT: 520 if (flt_reg < flt_reg_max) regs[i].set1(as_FloatRegister(flt_reg++)->as_VMReg()); 521 else regs[i].set1( VMRegImpl::stack2reg(stk_reg++)); 522 break; 523 case T_DOUBLE: 524 assert(sig_bt[i+1] == T_VOID, "expecting half"); 525 if (flt_reg_pairs + 1 < flt_reg_max) { 526 regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg()); 527 flt_reg_pairs += 2; 528 } else { 529 regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs)); 530 stk_reg_pairs += 2; 531 } 532 break; 533 case T_VOID: regs[i].set_bad(); break; // Halves of longs & doubles 534 default: 535 ShouldNotReachHere(); 536 } 537 } 538 539 // retun the amount of stack space these arguments will need. 540 return stk_reg_pairs; 541 542} 543 544// Helper class mostly to avoid passing masm everywhere, and handle store 545// displacement overflow logic for LP64 546class AdapterGenerator { 547 MacroAssembler *masm; 548#ifdef _LP64 549 Register Rdisp; 550 void set_Rdisp(Register r) { Rdisp = r; } 551#endif // _LP64 552 553 void patch_callers_callsite(); 554 void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch); 555 556 // base+st_off points to top of argument 557 int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); } 558 int next_arg_offset(const int st_off) { 559 return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes(); 560 } 561 562#ifdef _LP64 563 // On _LP64 argument slot values are loaded first into a register 564 // because they might not fit into displacement. 565 Register arg_slot(const int st_off); 566 Register next_arg_slot(const int st_off); 567#else 568 int arg_slot(const int st_off) { return arg_offset(st_off); } 569 int next_arg_slot(const int st_off) { return next_arg_offset(st_off); } 570#endif // _LP64 571 572 // Stores long into offset pointed to by base 573 void store_c2i_long(Register r, Register base, 574 const int st_off, bool is_stack); 575 void store_c2i_object(Register r, Register base, 576 const int st_off); 577 void store_c2i_int(Register r, Register base, 578 const int st_off); 579 void store_c2i_double(VMReg r_2, 580 VMReg r_1, Register base, const int st_off); 581 void store_c2i_float(FloatRegister f, Register base, 582 const int st_off); 583 584 public: 585 void gen_c2i_adapter(int total_args_passed, 586 // VMReg max_arg, 587 int comp_args_on_stack, // VMRegStackSlots 588 const BasicType *sig_bt, 589 const VMRegPair *regs, 590 Label& skip_fixup); 591 void gen_i2c_adapter(int total_args_passed, 592 // VMReg max_arg, 593 int comp_args_on_stack, // VMRegStackSlots 594 const BasicType *sig_bt, 595 const VMRegPair *regs); 596 597 AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {} 598}; 599 600 601// Patch the callers callsite with entry to compiled code if it exists. 602void AdapterGenerator::patch_callers_callsite() { 603 Label L; 604 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 605 __ br_null(G3_scratch, false, __ pt, L); 606 // Schedule the branch target address early. 607 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 608 // Call into the VM to patch the caller, then jump to compiled callee 609 __ save_frame(4); // Args in compiled layout; do not blow them 610 611 // Must save all the live Gregs the list is: 612 // G1: 1st Long arg (32bit build) 613 // G2: global allocated to TLS 614 // G3: used in inline cache check (scratch) 615 // G4: 2nd Long arg (32bit build); 616 // G5: used in inline cache check (methodOop) 617 618 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 619 620#ifdef _LP64 621 // mov(s,d) 622 __ mov(G1, L1); 623 __ mov(G4, L4); 624 __ mov(G5_method, L5); 625 __ mov(G5_method, O0); // VM needs target method 626 __ mov(I7, O1); // VM needs caller's callsite 627 // Must be a leaf call... 628 // can be very far once the blob has been relocated 629 Address dest(O7, CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)); 630 __ relocate(relocInfo::runtime_call_type); 631 __ jumpl_to(dest, O7); 632 __ delayed()->mov(G2_thread, L7_thread_cache); 633 __ mov(L7_thread_cache, G2_thread); 634 __ mov(L1, G1); 635 __ mov(L4, G4); 636 __ mov(L5, G5_method); 637#else 638 __ stx(G1, FP, -8 + STACK_BIAS); 639 __ stx(G4, FP, -16 + STACK_BIAS); 640 __ mov(G5_method, L5); 641 __ mov(G5_method, O0); // VM needs target method 642 __ mov(I7, O1); // VM needs caller's callsite 643 // Must be a leaf call... 644 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type); 645 __ delayed()->mov(G2_thread, L7_thread_cache); 646 __ mov(L7_thread_cache, G2_thread); 647 __ ldx(FP, -8 + STACK_BIAS, G1); 648 __ ldx(FP, -16 + STACK_BIAS, G4); 649 __ mov(L5, G5_method); 650 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 651#endif /* _LP64 */ 652 653 __ restore(); // Restore args 654 __ bind(L); 655} 656 657void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off, 658 Register scratch) { 659 if (TaggedStackInterpreter) { 660 int tag_off = st_off + Interpreter::tag_offset_in_bytes(); 661#ifdef _LP64 662 Register tag_slot = Rdisp; 663 __ set(tag_off, tag_slot); 664#else 665 int tag_slot = tag_off; 666#endif // _LP64 667 // have to store zero because local slots can be reused (rats!) 668 if (t == frame::TagValue) { 669 __ st_ptr(G0, base, tag_slot); 670 } else if (t == frame::TagCategory2) { 671 __ st_ptr(G0, base, tag_slot); 672 int next_tag_off = st_off - Interpreter::stackElementSize() + 673 Interpreter::tag_offset_in_bytes(); 674#ifdef _LP64 675 __ set(next_tag_off, tag_slot); 676#else 677 tag_slot = next_tag_off; 678#endif // _LP64 679 __ st_ptr(G0, base, tag_slot); 680 } else { 681 __ mov(t, scratch); 682 __ st_ptr(scratch, base, tag_slot); 683 } 684 } 685} 686 687#ifdef _LP64 688Register AdapterGenerator::arg_slot(const int st_off) { 689 __ set( arg_offset(st_off), Rdisp); 690 return Rdisp; 691} 692 693Register AdapterGenerator::next_arg_slot(const int st_off){ 694 __ set( next_arg_offset(st_off), Rdisp); 695 return Rdisp; 696} 697#endif // _LP64 698 699// Stores long into offset pointed to by base 700void AdapterGenerator::store_c2i_long(Register r, Register base, 701 const int st_off, bool is_stack) { 702#ifdef _LP64 703 // In V9, longs are given 2 64-bit slots in the interpreter, but the 704 // data is passed in only 1 slot. 705 __ stx(r, base, next_arg_slot(st_off)); 706#else 707#ifdef COMPILER2 708 // Misaligned store of 64-bit data 709 __ stw(r, base, arg_slot(st_off)); // lo bits 710 __ srlx(r, 32, r); 711 __ stw(r, base, next_arg_slot(st_off)); // hi bits 712#else 713 if (is_stack) { 714 // Misaligned store of 64-bit data 715 __ stw(r, base, arg_slot(st_off)); // lo bits 716 __ srlx(r, 32, r); 717 __ stw(r, base, next_arg_slot(st_off)); // hi bits 718 } else { 719 __ stw(r->successor(), base, arg_slot(st_off) ); // lo bits 720 __ stw(r , base, next_arg_slot(st_off)); // hi bits 721 } 722#endif // COMPILER2 723#endif // _LP64 724 tag_c2i_arg(frame::TagCategory2, base, st_off, r); 725} 726 727void AdapterGenerator::store_c2i_object(Register r, Register base, 728 const int st_off) { 729 __ st_ptr (r, base, arg_slot(st_off)); 730 tag_c2i_arg(frame::TagReference, base, st_off, r); 731} 732 733void AdapterGenerator::store_c2i_int(Register r, Register base, 734 const int st_off) { 735 __ st (r, base, arg_slot(st_off)); 736 tag_c2i_arg(frame::TagValue, base, st_off, r); 737} 738 739// Stores into offset pointed to by base 740void AdapterGenerator::store_c2i_double(VMReg r_2, 741 VMReg r_1, Register base, const int st_off) { 742#ifdef _LP64 743 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 744 // data is passed in only 1 slot. 745 __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 746#else 747 // Need to marshal 64-bit value from misaligned Lesp loads 748 __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off)); 749 __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) ); 750#endif 751 tag_c2i_arg(frame::TagCategory2, base, st_off, G1_scratch); 752} 753 754void AdapterGenerator::store_c2i_float(FloatRegister f, Register base, 755 const int st_off) { 756 __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off)); 757 tag_c2i_arg(frame::TagValue, base, st_off, G1_scratch); 758} 759 760void AdapterGenerator::gen_c2i_adapter( 761 int total_args_passed, 762 // VMReg max_arg, 763 int comp_args_on_stack, // VMRegStackSlots 764 const BasicType *sig_bt, 765 const VMRegPair *regs, 766 Label& skip_fixup) { 767 768 // Before we get into the guts of the C2I adapter, see if we should be here 769 // at all. We've come from compiled code and are attempting to jump to the 770 // interpreter, which means the caller made a static call to get here 771 // (vcalls always get a compiled target if there is one). Check for a 772 // compiled target. If there is one, we need to patch the caller's call. 773 // However we will run interpreted if we come thru here. The next pass 774 // thru the call site will run compiled. If we ran compiled here then 775 // we can (theorectically) do endless i2c->c2i->i2c transitions during 776 // deopt/uncommon trap cycles. If we always go interpreted here then 777 // we can have at most one and don't need to play any tricks to keep 778 // from endlessly growing the stack. 779 // 780 // Actually if we detected that we had an i2c->c2i transition here we 781 // ought to be able to reset the world back to the state of the interpreted 782 // call and not bother building another interpreter arg area. We don't 783 // do that at this point. 784 785 patch_callers_callsite(); 786 787 __ bind(skip_fixup); 788 789 // Since all args are passed on the stack, total_args_passed*wordSize is the 790 // space we need. Add in varargs area needed by the interpreter. Round up 791 // to stack alignment. 792 const int arg_size = total_args_passed * Interpreter::stackElementSize(); 793 const int varargs_area = 794 (frame::varargs_offset - frame::register_save_words)*wordSize; 795 const int extraspace = round_to(arg_size + varargs_area, 2*wordSize); 796 797 int bias = STACK_BIAS; 798 const int interp_arg_offset = frame::varargs_offset*wordSize + 799 (total_args_passed-1)*Interpreter::stackElementSize(); 800 801 Register base = SP; 802 803#ifdef _LP64 804 // In the 64bit build because of wider slots and STACKBIAS we can run 805 // out of bits in the displacement to do loads and stores. Use g3 as 806 // temporary displacement. 807 if (! __ is_simm13(extraspace)) { 808 __ set(extraspace, G3_scratch); 809 __ sub(SP, G3_scratch, SP); 810 } else { 811 __ sub(SP, extraspace, SP); 812 } 813 set_Rdisp(G3_scratch); 814#else 815 __ sub(SP, extraspace, SP); 816#endif // _LP64 817 818 // First write G1 (if used) to where ever it must go 819 for (int i=0; i<total_args_passed; i++) { 820 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias; 821 VMReg r_1 = regs[i].first(); 822 VMReg r_2 = regs[i].second(); 823 if (r_1 == G1_scratch->as_VMReg()) { 824 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 825 store_c2i_object(G1_scratch, base, st_off); 826 } else if (sig_bt[i] == T_LONG) { 827 assert(!TieredCompilation, "should not use register args for longs"); 828 store_c2i_long(G1_scratch, base, st_off, false); 829 } else { 830 store_c2i_int(G1_scratch, base, st_off); 831 } 832 } 833 } 834 835 // Now write the args into the outgoing interpreter space 836 for (int i=0; i<total_args_passed; i++) { 837 const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize()) + bias; 838 VMReg r_1 = regs[i].first(); 839 VMReg r_2 = regs[i].second(); 840 if (!r_1->is_valid()) { 841 assert(!r_2->is_valid(), ""); 842 continue; 843 } 844 // Skip G1 if found as we did it first in order to free it up 845 if (r_1 == G1_scratch->as_VMReg()) { 846 continue; 847 } 848#ifdef ASSERT 849 bool G1_forced = false; 850#endif // ASSERT 851 if (r_1->is_stack()) { // Pretend stack targets are loaded into G1 852#ifdef _LP64 853 Register ld_off = Rdisp; 854 __ set(reg2offset(r_1) + extraspace + bias, ld_off); 855#else 856 int ld_off = reg2offset(r_1) + extraspace + bias; 857#ifdef ASSERT 858 G1_forced = true; 859#endif // ASSERT 860#endif // _LP64 861 r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle 862 if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch); 863 else __ ldx(base, ld_off, G1_scratch); 864 } 865 866 if (r_1->is_Register()) { 867 Register r = r_1->as_Register()->after_restore(); 868 if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) { 869 store_c2i_object(r, base, st_off); 870 } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) { 871 if (TieredCompilation) { 872 assert(G1_forced || sig_bt[i] != T_LONG, "should not use register args for longs"); 873 } 874 store_c2i_long(r, base, st_off, r_2->is_stack()); 875 } else { 876 store_c2i_int(r, base, st_off); 877 } 878 } else { 879 assert(r_1->is_FloatRegister(), ""); 880 if (sig_bt[i] == T_FLOAT) { 881 store_c2i_float(r_1->as_FloatRegister(), base, st_off); 882 } else { 883 assert(sig_bt[i] == T_DOUBLE, "wrong type"); 884 store_c2i_double(r_2, r_1, base, st_off); 885 } 886 } 887 } 888 889#ifdef _LP64 890 // Need to reload G3_scratch, used for temporary displacements. 891 __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 892 893 // Pass O5_savedSP as an argument to the interpreter. 894 // The interpreter will restore SP to this value before returning. 895 __ set(extraspace, G1); 896 __ add(SP, G1, O5_savedSP); 897#else 898 // Pass O5_savedSP as an argument to the interpreter. 899 // The interpreter will restore SP to this value before returning. 900 __ add(SP, extraspace, O5_savedSP); 901#endif // _LP64 902 903 __ mov((frame::varargs_offset)*wordSize - 904 1*Interpreter::stackElementSize()+bias+BytesPerWord, G1); 905 // Jump to the interpreter just as if interpreter was doing it. 906 __ jmpl(G3_scratch, 0, G0); 907 // Setup Lesp for the call. Cannot actually set Lesp as the current Lesp 908 // (really L0) is in use by the compiled frame as a generic temp. However, 909 // the interpreter does not know where its args are without some kind of 910 // arg pointer being passed in. Pass it in Gargs. 911 __ delayed()->add(SP, G1, Gargs); 912} 913 914void AdapterGenerator::gen_i2c_adapter( 915 int total_args_passed, 916 // VMReg max_arg, 917 int comp_args_on_stack, // VMRegStackSlots 918 const BasicType *sig_bt, 919 const VMRegPair *regs) { 920 921 // Generate an I2C adapter: adjust the I-frame to make space for the C-frame 922 // layout. Lesp was saved by the calling I-frame and will be restored on 923 // return. Meanwhile, outgoing arg space is all owned by the callee 924 // C-frame, so we can mangle it at will. After adjusting the frame size, 925 // hoist register arguments and repack other args according to the compiled 926 // code convention. Finally, end in a jump to the compiled code. The entry 927 // point address is the start of the buffer. 928 929 // We will only enter here from an interpreted frame and never from after 930 // passing thru a c2i. Azul allowed this but we do not. If we lose the 931 // race and use a c2i we will remain interpreted for the race loser(s). 932 // This removes all sorts of headaches on the x86 side and also eliminates 933 // the possibility of having c2i -> i2c -> c2i -> ... endless transitions. 934 935 // As you can see from the list of inputs & outputs there are not a lot 936 // of temp registers to work with: mostly G1, G3 & G4. 937 938 // Inputs: 939 // G2_thread - TLS 940 // G5_method - Method oop 941 // O0 - Flag telling us to restore SP from O5 942 // O4_args - Pointer to interpreter's args 943 // O5 - Caller's saved SP, to be restored if needed 944 // O6 - Current SP! 945 // O7 - Valid return address 946 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 947 948 // Outputs: 949 // G2_thread - TLS 950 // G1, G4 - Outgoing long args in 32-bit build 951 // O0-O5 - Outgoing args in compiled layout 952 // O6 - Adjusted or restored SP 953 // O7 - Valid return address 954 // L0-L7, I0-I7 - Caller's temps (no frame pushed yet) 955 // F0-F7 - more outgoing args 956 957 958 // O4 is about to get loaded up with compiled callee's args 959 __ sub(Gargs, BytesPerWord, Gargs); 960 961#ifdef ASSERT 962 { 963 // on entry OsavedSP and SP should be equal 964 Label ok; 965 __ cmp(O5_savedSP, SP); 966 __ br(Assembler::equal, false, Assembler::pt, ok); 967 __ delayed()->nop(); 968 __ stop("I5_savedSP not set"); 969 __ should_not_reach_here(); 970 __ bind(ok); 971 } 972#endif 973 974 // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME 975 // WITH O7 HOLDING A VALID RETURN PC 976 // 977 // | | 978 // : java stack : 979 // | | 980 // +--------------+ <--- start of outgoing args 981 // | receiver | | 982 // : rest of args : |---size is java-arg-words 983 // | | | 984 // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I 985 // | | | 986 // : unused : |---Space for max Java stack, plus stack alignment 987 // | | | 988 // +--------------+ <--- SP + 16*wordsize 989 // | | 990 // : window : 991 // | | 992 // +--------------+ <--- SP 993 994 // WE REPACK THE STACK. We use the common calling convention layout as 995 // discovered by calling SharedRuntime::calling_convention. We assume it 996 // causes an arbitrary shuffle of memory, which may require some register 997 // temps to do the shuffle. We hope for (and optimize for) the case where 998 // temps are not needed. We may have to resize the stack slightly, in case 999 // we need alignment padding (32-bit interpreter can pass longs & doubles 1000 // misaligned, but the compilers expect them aligned). 1001 // 1002 // | | 1003 // : java stack : 1004 // | | 1005 // +--------------+ <--- start of outgoing args 1006 // | pad, align | | 1007 // +--------------+ | 1008 // | ints, floats | |---Outgoing stack args, packed low. 1009 // +--------------+ | First few args in registers. 1010 // : doubles : | 1011 // | longs | | 1012 // +--------------+ <--- SP' + 16*wordsize 1013 // | | 1014 // : window : 1015 // | | 1016 // +--------------+ <--- SP' 1017 1018 // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME 1019 // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP 1020 // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN. 1021 1022 // Cut-out for having no stack args. Since up to 6 args are passed 1023 // in registers, we will commonly have no stack args. 1024 if (comp_args_on_stack > 0) { 1025 1026 // Convert VMReg stack slots to words. 1027 int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord; 1028 // Round up to miminum stack alignment, in wordSize 1029 comp_words_on_stack = round_to(comp_words_on_stack, 2); 1030 // Now compute the distance from Lesp to SP. This calculation does not 1031 // include the space for total_args_passed because Lesp has not yet popped 1032 // the arguments. 1033 __ sub(SP, (comp_words_on_stack)*wordSize, SP); 1034 } 1035 1036 // Will jump to the compiled code just as if compiled code was doing it. 1037 // Pre-load the register-jump target early, to schedule it better. 1038 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1039 1040 // Now generate the shuffle code. Pick up all register args and move the 1041 // rest through G1_scratch. 1042 for (int i=0; i<total_args_passed; i++) { 1043 if (sig_bt[i] == T_VOID) { 1044 // Longs and doubles are passed in native word order, but misaligned 1045 // in the 32-bit build. 1046 assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half"); 1047 continue; 1048 } 1049 1050 // Pick up 0, 1 or 2 words from Lesp+offset. Assume mis-aligned in the 1051 // 32-bit build and aligned in the 64-bit build. Look for the obvious 1052 // ldx/lddf optimizations. 1053 1054 // Load in argument order going down. 1055 const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize(); 1056#ifdef _LP64 1057 set_Rdisp(G1_scratch); 1058#endif // _LP64 1059 1060 VMReg r_1 = regs[i].first(); 1061 VMReg r_2 = regs[i].second(); 1062 if (!r_1->is_valid()) { 1063 assert(!r_2->is_valid(), ""); 1064 continue; 1065 } 1066 if (r_1->is_stack()) { // Pretend stack targets are loaded into F8/F9 1067 r_1 = F8->as_VMReg(); // as part of the load/store shuffle 1068 if (r_2->is_valid()) r_2 = r_1->next(); 1069 } 1070 if (r_1->is_Register()) { // Register argument 1071 Register r = r_1->as_Register()->after_restore(); 1072 if (!r_2->is_valid()) { 1073 __ ld(Gargs, arg_slot(ld_off), r); 1074 } else { 1075#ifdef _LP64 1076 // In V9, longs are given 2 64-bit slots in the interpreter, but the 1077 // data is passed in only 1 slot. 1078 Register slot = (sig_bt[i]==T_LONG) ? 1079 next_arg_slot(ld_off) : arg_slot(ld_off); 1080 __ ldx(Gargs, slot, r); 1081#else 1082 // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the 1083 // stack shuffle. Load the first 2 longs into G1/G4 later. 1084#endif 1085 } 1086 } else { 1087 assert(r_1->is_FloatRegister(), ""); 1088 if (!r_2->is_valid()) { 1089 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister()); 1090 } else { 1091#ifdef _LP64 1092 // In V9, doubles are given 2 64-bit slots in the interpreter, but the 1093 // data is passed in only 1 slot. This code also handles longs that 1094 // are passed on the stack, but need a stack-to-stack move through a 1095 // spare float register. 1096 Register slot = (sig_bt[i]==T_LONG || sig_bt[i] == T_DOUBLE) ? 1097 next_arg_slot(ld_off) : arg_slot(ld_off); 1098 __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister()); 1099#else 1100 // Need to marshal 64-bit value from misaligned Lesp loads 1101 __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister()); 1102 __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister()); 1103#endif 1104 } 1105 } 1106 // Was the argument really intended to be on the stack, but was loaded 1107 // into F8/F9? 1108 if (regs[i].first()->is_stack()) { 1109 assert(r_1->as_FloatRegister() == F8, "fix this code"); 1110 // Convert stack slot to an SP offset 1111 int st_off = reg2offset(regs[i].first()) + STACK_BIAS; 1112 // Store down the shuffled stack word. Target address _is_ aligned. 1113 if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, st_off); 1114 else __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, st_off); 1115 } 1116 } 1117 bool made_space = false; 1118#ifndef _LP64 1119 // May need to pick up a few long args in G1/G4 1120 bool g4_crushed = false; 1121 bool g3_crushed = false; 1122 for (int i=0; i<total_args_passed; i++) { 1123 if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) { 1124 // Load in argument order going down 1125 int ld_off = (total_args_passed-i)*Interpreter::stackElementSize(); 1126 // Need to marshal 64-bit value from misaligned Lesp loads 1127 Register r = regs[i].first()->as_Register()->after_restore(); 1128 if (r == G1 || r == G4) { 1129 assert(!g4_crushed, "ordering problem"); 1130 if (r == G4){ 1131 g4_crushed = true; 1132 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits 1133 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1134 } else { 1135 // better schedule this way 1136 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1137 __ lduw(Gargs, arg_slot(ld_off) , G3_scratch); // Load lo bits 1138 } 1139 g3_crushed = true; 1140 __ sllx(r, 32, r); 1141 __ or3(G3_scratch, r, r); 1142 } else { 1143 assert(r->is_out(), "longs passed in two O registers"); 1144 __ ld (Gargs, arg_slot(ld_off) , r->successor()); // Load lo bits 1145 __ ld (Gargs, next_arg_slot(ld_off), r); // Load hi bits 1146 } 1147 } 1148 } 1149#endif 1150 1151 // Jump to the compiled code just as if compiled code was doing it. 1152 // 1153#ifndef _LP64 1154 if (g3_crushed) { 1155 // Rats load was wasted, at least it is in cache... 1156 __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1157 } 1158#endif /* _LP64 */ 1159 1160 // 6243940 We might end up in handle_wrong_method if 1161 // the callee is deoptimized as we race thru here. If that 1162 // happens we don't want to take a safepoint because the 1163 // caller frame will look interpreted and arguments are now 1164 // "compiled" so it is much better to make this transition 1165 // invisible to the stack walking code. Unfortunately if 1166 // we try and find the callee by normal means a safepoint 1167 // is possible. So we stash the desired callee in the thread 1168 // and the vm will find there should this case occur. 1169 Address callee_target_addr(G2_thread, 0, in_bytes(JavaThread::callee_target_offset())); 1170 __ st_ptr(G5_method, callee_target_addr); 1171 1172 if (StressNonEntrant) { 1173 // Open a big window for deopt failure 1174 __ save_frame(0); 1175 __ mov(G0, L0); 1176 Label loop; 1177 __ bind(loop); 1178 __ sub(L0, 1, L0); 1179 __ br_null(L0, false, Assembler::pt, loop); 1180 __ delayed()->nop(); 1181 1182 __ restore(); 1183 } 1184 1185 1186 __ jmpl(G3, 0, G0); 1187 __ delayed()->nop(); 1188} 1189 1190// --------------------------------------------------------------- 1191AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm, 1192 int total_args_passed, 1193 // VMReg max_arg, 1194 int comp_args_on_stack, // VMRegStackSlots 1195 const BasicType *sig_bt, 1196 const VMRegPair *regs) { 1197 address i2c_entry = __ pc(); 1198 1199 AdapterGenerator agen(masm); 1200 1201 agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs); 1202 1203 1204 // ------------------------------------------------------------------------- 1205 // Generate a C2I adapter. On entry we know G5 holds the methodOop. The 1206 // args start out packed in the compiled layout. They need to be unpacked 1207 // into the interpreter layout. This will almost always require some stack 1208 // space. We grow the current (compiled) stack, then repack the args. We 1209 // finally end in a jump to the generic interpreter entry point. On exit 1210 // from the interpreter, the interpreter will restore our SP (lest the 1211 // compiled code, which relys solely on SP and not FP, get sick). 1212 1213 address c2i_unverified_entry = __ pc(); 1214 Label skip_fixup; 1215 { 1216#if !defined(_LP64) && defined(COMPILER2) 1217 Register R_temp = L0; // another scratch register 1218#else 1219 Register R_temp = G1; // another scratch register 1220#endif 1221 1222 Address ic_miss(G3_scratch, SharedRuntime::get_ic_miss_stub()); 1223 1224 __ verify_oop(O0); 1225 __ verify_oop(G5_method); 1226 __ load_klass(O0, G3_scratch); 1227 __ verify_oop(G3_scratch); 1228 1229#if !defined(_LP64) && defined(COMPILER2) 1230 __ save(SP, -frame::register_save_words*wordSize, SP); 1231 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1232 __ verify_oop(R_temp); 1233 __ cmp(G3_scratch, R_temp); 1234 __ restore(); 1235#else 1236 __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1237 __ verify_oop(R_temp); 1238 __ cmp(G3_scratch, R_temp); 1239#endif 1240 1241 Label ok, ok2; 1242 __ brx(Assembler::equal, false, Assembler::pt, ok); 1243 __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); 1244 __ jump_to(ic_miss); 1245 __ delayed()->nop(); 1246 1247 __ bind(ok); 1248 // Method might have been compiled since the call site was patched to 1249 // interpreted if that is the case treat it as a miss so we can get 1250 // the call site corrected. 1251 __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 1252 __ bind(ok2); 1253 __ br_null(G3_scratch, false, __ pt, skip_fixup); 1254 __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1255 __ jump_to(ic_miss); 1256 __ delayed()->nop(); 1257 1258 } 1259 1260 address c2i_entry = __ pc(); 1261 1262 agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup); 1263 1264 __ flush(); 1265 return new AdapterHandlerEntry(i2c_entry, c2i_entry, c2i_unverified_entry); 1266 1267} 1268 1269// Helper function for native calling conventions 1270static VMReg int_stk_helper( int i ) { 1271 // Bias any stack based VMReg we get by ignoring the window area 1272 // but not the register parameter save area. 1273 // 1274 // This is strange for the following reasons. We'd normally expect 1275 // the calling convention to return an VMReg for a stack slot 1276 // completely ignoring any abi reserved area. C2 thinks of that 1277 // abi area as only out_preserve_stack_slots. This does not include 1278 // the area allocated by the C abi to store down integer arguments 1279 // because the java calling convention does not use it. So 1280 // since c2 assumes that there are only out_preserve_stack_slots 1281 // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack 1282 // location the c calling convention must add in this bias amount 1283 // to make up for the fact that the out_preserve_stack_slots is 1284 // insufficient for C calls. What a mess. I sure hope those 6 1285 // stack words were worth it on every java call! 1286 1287 // Another way of cleaning this up would be for out_preserve_stack_slots 1288 // to take a parameter to say whether it was C or java calling conventions. 1289 // Then things might look a little better (but not much). 1290 1291 int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM; 1292 if( mem_parm_offset < 0 ) { 1293 return as_oRegister(i)->as_VMReg(); 1294 } else { 1295 int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word; 1296 // Now return a biased offset that will be correct when out_preserve_slots is added back in 1297 return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots()); 1298 } 1299} 1300 1301 1302int SharedRuntime::c_calling_convention(const BasicType *sig_bt, 1303 VMRegPair *regs, 1304 int total_args_passed) { 1305 1306 // Return the number of VMReg stack_slots needed for the args. 1307 // This value does not include an abi space (like register window 1308 // save area). 1309 1310 // The native convention is V8 if !LP64 1311 // The LP64 convention is the V9 convention which is slightly more sane. 1312 1313 // We return the amount of VMReg stack slots we need to reserve for all 1314 // the arguments NOT counting out_preserve_stack_slots. Since we always 1315 // have space for storing at least 6 registers to memory we start with that. 1316 // See int_stk_helper for a further discussion. 1317 int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots(); 1318 1319#ifdef _LP64 1320 // V9 convention: All things "as-if" on double-wide stack slots. 1321 // Hoist any int/ptr/long's in the first 6 to int regs. 1322 // Hoist any flt/dbl's in the first 16 dbl regs. 1323 int j = 0; // Count of actual args, not HALVES 1324 for( int i=0; i<total_args_passed; i++, j++ ) { 1325 switch( sig_bt[i] ) { 1326 case T_BOOLEAN: 1327 case T_BYTE: 1328 case T_CHAR: 1329 case T_INT: 1330 case T_SHORT: 1331 regs[i].set1( int_stk_helper( j ) ); break; 1332 case T_LONG: 1333 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1334 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1335 case T_ARRAY: 1336 case T_OBJECT: 1337 regs[i].set2( int_stk_helper( j ) ); 1338 break; 1339 case T_FLOAT: 1340 if ( j < 16 ) { 1341 // V9ism: floats go in ODD registers 1342 regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg()); 1343 } else { 1344 // V9ism: floats go in ODD stack slot 1345 regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1))); 1346 } 1347 break; 1348 case T_DOUBLE: 1349 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1350 if ( j < 16 ) { 1351 // V9ism: doubles go in EVEN/ODD regs 1352 regs[i].set2(as_FloatRegister(j<<1)->as_VMReg()); 1353 } else { 1354 // V9ism: doubles go in EVEN/ODD stack slots 1355 regs[i].set2(VMRegImpl::stack2reg(j<<1)); 1356 } 1357 break; 1358 case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES 1359 default: 1360 ShouldNotReachHere(); 1361 } 1362 if (regs[i].first()->is_stack()) { 1363 int off = regs[i].first()->reg2stack(); 1364 if (off > max_stack_slots) max_stack_slots = off; 1365 } 1366 if (regs[i].second()->is_stack()) { 1367 int off = regs[i].second()->reg2stack(); 1368 if (off > max_stack_slots) max_stack_slots = off; 1369 } 1370 } 1371 1372#else // _LP64 1373 // V8 convention: first 6 things in O-regs, rest on stack. 1374 // Alignment is willy-nilly. 1375 for( int i=0; i<total_args_passed; i++ ) { 1376 switch( sig_bt[i] ) { 1377 case T_ADDRESS: // raw pointers, like current thread, for VM calls 1378 case T_ARRAY: 1379 case T_BOOLEAN: 1380 case T_BYTE: 1381 case T_CHAR: 1382 case T_FLOAT: 1383 case T_INT: 1384 case T_OBJECT: 1385 case T_SHORT: 1386 regs[i].set1( int_stk_helper( i ) ); 1387 break; 1388 case T_DOUBLE: 1389 case T_LONG: 1390 assert( sig_bt[i+1] == T_VOID, "expecting half" ); 1391 regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) ); 1392 break; 1393 case T_VOID: regs[i].set_bad(); break; 1394 default: 1395 ShouldNotReachHere(); 1396 } 1397 if (regs[i].first()->is_stack()) { 1398 int off = regs[i].first()->reg2stack(); 1399 if (off > max_stack_slots) max_stack_slots = off; 1400 } 1401 if (regs[i].second()->is_stack()) { 1402 int off = regs[i].second()->reg2stack(); 1403 if (off > max_stack_slots) max_stack_slots = off; 1404 } 1405 } 1406#endif // _LP64 1407 1408 return round_to(max_stack_slots + 1, 2); 1409 1410} 1411 1412 1413// --------------------------------------------------------------------------- 1414void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1415 switch (ret_type) { 1416 case T_FLOAT: 1417 __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS); 1418 break; 1419 case T_DOUBLE: 1420 __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS); 1421 break; 1422 } 1423} 1424 1425void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) { 1426 switch (ret_type) { 1427 case T_FLOAT: 1428 __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0); 1429 break; 1430 case T_DOUBLE: 1431 __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0); 1432 break; 1433 } 1434} 1435 1436// Check and forward and pending exception. Thread is stored in 1437// L7_thread_cache and possibly NOT in G2_thread. Since this is a native call, there 1438// is no exception handler. We merely pop this frame off and throw the 1439// exception in the caller's frame. 1440static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) { 1441 Label L; 1442 __ br_null(Rex_oop, false, Assembler::pt, L); 1443 __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception 1444 // Since this is a native call, we *know* the proper exception handler 1445 // without calling into the VM: it's the empty function. Just pop this 1446 // frame and then jump to forward_exception_entry; O7 will contain the 1447 // native caller's return PC. 1448 Address exception_entry(G3_scratch, StubRoutines::forward_exception_entry()); 1449 __ jump_to(exception_entry); 1450 __ delayed()->restore(); // Pop this frame off. 1451 __ bind(L); 1452} 1453 1454// A simple move of integer like type 1455static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1456 if (src.first()->is_stack()) { 1457 if (dst.first()->is_stack()) { 1458 // stack to stack 1459 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1460 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1461 } else { 1462 // stack to reg 1463 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1464 } 1465 } else if (dst.first()->is_stack()) { 1466 // reg to stack 1467 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1468 } else { 1469 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1470 } 1471} 1472 1473// On 64 bit we will store integer like items to the stack as 1474// 64 bits items (sparc abi) even though java would only store 1475// 32bits for a parameter. On 32bit it will simply be 32 bits 1476// So this routine will do 32->32 on 32bit and 32->64 on 64bit 1477static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1478 if (src.first()->is_stack()) { 1479 if (dst.first()->is_stack()) { 1480 // stack to stack 1481 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1482 __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1483 } else { 1484 // stack to reg 1485 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1486 } 1487 } else if (dst.first()->is_stack()) { 1488 // reg to stack 1489 __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1490 } else { 1491 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1492 } 1493} 1494 1495 1496// An oop arg. Must pass a handle not the oop itself 1497static void object_move(MacroAssembler* masm, 1498 OopMap* map, 1499 int oop_handle_offset, 1500 int framesize_in_slots, 1501 VMRegPair src, 1502 VMRegPair dst, 1503 bool is_receiver, 1504 int* receiver_offset) { 1505 1506 // must pass a handle. First figure out the location we use as a handle 1507 1508 if (src.first()->is_stack()) { 1509 // Oop is already on the stack 1510 Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register(); 1511 __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle); 1512 __ ld_ptr(rHandle, 0, L4); 1513#ifdef _LP64 1514 __ movr( Assembler::rc_z, L4, G0, rHandle ); 1515#else 1516 __ tst( L4 ); 1517 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1518#endif 1519 if (dst.first()->is_stack()) { 1520 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1521 } 1522 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1523 if (is_receiver) { 1524 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1525 } 1526 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1527 } else { 1528 // Oop is in an input register pass we must flush it to the stack 1529 const Register rOop = src.first()->as_Register(); 1530 const Register rHandle = L5; 1531 int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; 1532 int offset = oop_slot*VMRegImpl::stack_slot_size; 1533 Label skip; 1534 __ st_ptr(rOop, SP, offset + STACK_BIAS); 1535 if (is_receiver) { 1536 *receiver_offset = oop_slot * VMRegImpl::stack_slot_size; 1537 } 1538 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1539 __ add(SP, offset + STACK_BIAS, rHandle); 1540#ifdef _LP64 1541 __ movr( Assembler::rc_z, rOop, G0, rHandle ); 1542#else 1543 __ tst( rOop ); 1544 __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle ); 1545#endif 1546 1547 if (dst.first()->is_stack()) { 1548 __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS); 1549 } else { 1550 __ mov(rHandle, dst.first()->as_Register()); 1551 } 1552 } 1553} 1554 1555// A float arg may have to do float reg int reg conversion 1556static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1557 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 1558 1559 if (src.first()->is_stack()) { 1560 if (dst.first()->is_stack()) { 1561 // stack to stack the easiest of the bunch 1562 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1563 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1564 } else { 1565 // stack to reg 1566 if (dst.first()->is_Register()) { 1567 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1568 } else { 1569 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1570 } 1571 } 1572 } else if (dst.first()->is_stack()) { 1573 // reg to stack 1574 if (src.first()->is_Register()) { 1575 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1576 } else { 1577 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1578 } 1579 } else { 1580 // reg to reg 1581 if (src.first()->is_Register()) { 1582 if (dst.first()->is_Register()) { 1583 // gpr -> gpr 1584 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1585 } else { 1586 // gpr -> fpr 1587 __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS); 1588 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister()); 1589 } 1590 } else if (dst.first()->is_Register()) { 1591 // fpr -> gpr 1592 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS); 1593 __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register()); 1594 } else { 1595 // fpr -> fpr 1596 // In theory these overlap but the ordering is such that this is likely a nop 1597 if ( src.first() != dst.first()) { 1598 __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1599 } 1600 } 1601 } 1602} 1603 1604static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1605 VMRegPair src_lo(src.first()); 1606 VMRegPair src_hi(src.second()); 1607 VMRegPair dst_lo(dst.first()); 1608 VMRegPair dst_hi(dst.second()); 1609 simple_move32(masm, src_lo, dst_lo); 1610 simple_move32(masm, src_hi, dst_hi); 1611} 1612 1613// A long move 1614static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1615 1616 // Do the simple ones here else do two int moves 1617 if (src.is_single_phys_reg() ) { 1618 if (dst.is_single_phys_reg()) { 1619 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1620 } else { 1621 // split src into two separate registers 1622 // Remember hi means hi address or lsw on sparc 1623 // Move msw to lsw 1624 if (dst.second()->is_reg()) { 1625 // MSW -> MSW 1626 __ srax(src.first()->as_Register(), 32, dst.first()->as_Register()); 1627 // Now LSW -> LSW 1628 // this will only move lo -> lo and ignore hi 1629 VMRegPair split(dst.second()); 1630 simple_move32(masm, src, split); 1631 } else { 1632 VMRegPair split(src.first(), L4->as_VMReg()); 1633 // MSW -> MSW (lo ie. first word) 1634 __ srax(src.first()->as_Register(), 32, L4); 1635 split_long_move(masm, split, dst); 1636 } 1637 } 1638 } else if (dst.is_single_phys_reg()) { 1639 if (src.is_adjacent_aligned_on_stack(2)) { 1640 __ ld_long(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1641 } else { 1642 // dst is a single reg. 1643 // Remember lo is low address not msb for stack slots 1644 // and lo is the "real" register for registers 1645 // src is 1646 1647 VMRegPair split; 1648 1649 if (src.first()->is_reg()) { 1650 // src.lo (msw) is a reg, src.hi is stk/reg 1651 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg] 1652 split.set_pair(dst.first(), src.first()); 1653 } else { 1654 // msw is stack move to L5 1655 // lsw is stack move to dst.lo (real reg) 1656 // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5 1657 split.set_pair(dst.first(), L5->as_VMReg()); 1658 } 1659 1660 // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg) 1661 // msw -> src.lo/L5, lsw -> dst.lo 1662 split_long_move(masm, src, split); 1663 1664 // So dst now has the low order correct position the 1665 // msw half 1666 __ sllx(split.first()->as_Register(), 32, L5); 1667 1668 const Register d = dst.first()->as_Register(); 1669 __ or3(L5, d, d); 1670 } 1671 } else { 1672 // For LP64 we can probably do better. 1673 split_long_move(masm, src, dst); 1674 } 1675} 1676 1677// A double move 1678static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) { 1679 1680 // The painful thing here is that like long_move a VMRegPair might be 1681 // 1: a single physical register 1682 // 2: two physical registers (v8) 1683 // 3: a physical reg [lo] and a stack slot [hi] (v8) 1684 // 4: two stack slots 1685 1686 // Since src is always a java calling convention we know that the src pair 1687 // is always either all registers or all stack (and aligned?) 1688 1689 // in a register [lo] and a stack slot [hi] 1690 if (src.first()->is_stack()) { 1691 if (dst.first()->is_stack()) { 1692 // stack to stack the easiest of the bunch 1693 // ought to be a way to do this where if alignment is ok we use ldd/std when possible 1694 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5); 1695 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1696 __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS); 1697 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1698 } else { 1699 // stack to reg 1700 if (dst.second()->is_stack()) { 1701 // stack -> reg, stack -> stack 1702 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1703 if (dst.first()->is_Register()) { 1704 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1705 } else { 1706 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1707 } 1708 // This was missing. (very rare case) 1709 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1710 } else { 1711 // stack -> reg 1712 // Eventually optimize for alignment QQQ 1713 if (dst.first()->is_Register()) { 1714 __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register()); 1715 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register()); 1716 } else { 1717 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister()); 1718 __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister()); 1719 } 1720 } 1721 } 1722 } else if (dst.first()->is_stack()) { 1723 // reg to stack 1724 if (src.first()->is_Register()) { 1725 // Eventually optimize for alignment QQQ 1726 __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS); 1727 if (src.second()->is_stack()) { 1728 __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4); 1729 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1730 } else { 1731 __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS); 1732 } 1733 } else { 1734 // fpr to stack 1735 if (src.second()->is_stack()) { 1736 ShouldNotReachHere(); 1737 } else { 1738 // Is the stack aligned? 1739 if (reg2offset(dst.first()) & 0x7) { 1740 // No do as pairs 1741 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1742 __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS); 1743 } else { 1744 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS); 1745 } 1746 } 1747 } 1748 } else { 1749 // reg to reg 1750 if (src.first()->is_Register()) { 1751 if (dst.first()->is_Register()) { 1752 // gpr -> gpr 1753 __ mov(src.first()->as_Register(), dst.first()->as_Register()); 1754 __ mov(src.second()->as_Register(), dst.second()->as_Register()); 1755 } else { 1756 // gpr -> fpr 1757 // ought to be able to do a single store 1758 __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS); 1759 __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS); 1760 // ought to be able to do a single load 1761 __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister()); 1762 __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister()); 1763 } 1764 } else if (dst.first()->is_Register()) { 1765 // fpr -> gpr 1766 // ought to be able to do a single store 1767 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS); 1768 // ought to be able to do a single load 1769 // REMEMBER first() is low address not LSB 1770 __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register()); 1771 if (dst.second()->is_Register()) { 1772 __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register()); 1773 } else { 1774 __ ld(FP, -4 + STACK_BIAS, L4); 1775 __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS); 1776 } 1777 } else { 1778 // fpr -> fpr 1779 // In theory these overlap but the ordering is such that this is likely a nop 1780 if ( src.first() != dst.first()) { 1781 __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister()); 1782 } 1783 } 1784 } 1785} 1786 1787// Creates an inner frame if one hasn't already been created, and 1788// saves a copy of the thread in L7_thread_cache 1789static void create_inner_frame(MacroAssembler* masm, bool* already_created) { 1790 if (!*already_created) { 1791 __ save_frame(0); 1792 // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below 1793 // Don't use save_thread because it smashes G2 and we merely want to save a 1794 // copy 1795 __ mov(G2_thread, L7_thread_cache); 1796 *already_created = true; 1797 } 1798} 1799 1800// --------------------------------------------------------------------------- 1801// Generate a native wrapper for a given method. The method takes arguments 1802// in the Java compiled code convention, marshals them to the native 1803// convention (handlizes oops, etc), transitions to native, makes the call, 1804// returns to java state (possibly blocking), unhandlizes any result and 1805// returns. 1806nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm, 1807 methodHandle method, 1808 int total_in_args, 1809 int comp_args_on_stack, // in VMRegStackSlots 1810 BasicType *in_sig_bt, 1811 VMRegPair *in_regs, 1812 BasicType ret_type) { 1813 1814 1815 // Native nmethod wrappers never take possesion of the oop arguments. 1816 // So the caller will gc the arguments. The only thing we need an 1817 // oopMap for is if the call is static 1818 // 1819 // An OopMap for lock (and class if static), and one for the VM call itself 1820 OopMapSet *oop_maps = new OopMapSet(); 1821 intptr_t start = (intptr_t)__ pc(); 1822 1823 // First thing make an ic check to see if we should even be here 1824 { 1825 Label L; 1826 const Register temp_reg = G3_scratch; 1827 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); 1828 __ verify_oop(O0); 1829 __ load_klass(O0, temp_reg); 1830 __ cmp(temp_reg, G5_inline_cache_reg); 1831 __ brx(Assembler::equal, true, Assembler::pt, L); 1832 __ delayed()->nop(); 1833 1834 __ jump_to(ic_miss, 0); 1835 __ delayed()->nop(); 1836 __ align(CodeEntryAlignment); 1837 __ bind(L); 1838 } 1839 1840 int vep_offset = ((intptr_t)__ pc()) - start; 1841 1842#ifdef COMPILER1 1843 if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) { 1844 // Object.hashCode can pull the hashCode from the header word 1845 // instead of doing a full VM transition once it's been computed. 1846 // Since hashCode is usually polymorphic at call sites we can't do 1847 // this optimization at the call site without a lot of work. 1848 Label slowCase; 1849 Register receiver = O0; 1850 Register result = O0; 1851 Register header = G3_scratch; 1852 Register hash = G3_scratch; // overwrite header value with hash value 1853 Register mask = G1; // to get hash field from header 1854 1855 // Read the header and build a mask to get its hash field. Give up if the object is not unlocked. 1856 // We depend on hash_mask being at most 32 bits and avoid the use of 1857 // hash_mask_in_place because it could be larger than 32 bits in a 64-bit 1858 // vm: see markOop.hpp. 1859 __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header); 1860 __ sethi(markOopDesc::hash_mask, mask); 1861 __ btst(markOopDesc::unlocked_value, header); 1862 __ br(Assembler::zero, false, Assembler::pn, slowCase); 1863 if (UseBiasedLocking) { 1864 // Check if biased and fall through to runtime if so 1865 __ delayed()->nop(); 1866 __ btst(markOopDesc::biased_lock_bit_in_place, header); 1867 __ br(Assembler::notZero, false, Assembler::pn, slowCase); 1868 } 1869 __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask); 1870 1871 // Check for a valid (non-zero) hash code and get its value. 1872#ifdef _LP64 1873 __ srlx(header, markOopDesc::hash_shift, hash); 1874#else 1875 __ srl(header, markOopDesc::hash_shift, hash); 1876#endif 1877 __ andcc(hash, mask, hash); 1878 __ br(Assembler::equal, false, Assembler::pn, slowCase); 1879 __ delayed()->nop(); 1880 1881 // leaf return. 1882 __ retl(); 1883 __ delayed()->mov(hash, result); 1884 __ bind(slowCase); 1885 } 1886#endif // COMPILER1 1887 1888 1889 // We have received a description of where all the java arg are located 1890 // on entry to the wrapper. We need to convert these args to where 1891 // the jni function will expect them. To figure out where they go 1892 // we convert the java signature to a C signature by inserting 1893 // the hidden arguments as arg[0] and possibly arg[1] (static method) 1894 1895 int total_c_args = total_in_args + 1; 1896 if (method->is_static()) { 1897 total_c_args++; 1898 } 1899 1900 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args); 1901 VMRegPair * out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args); 1902 1903 int argc = 0; 1904 out_sig_bt[argc++] = T_ADDRESS; 1905 if (method->is_static()) { 1906 out_sig_bt[argc++] = T_OBJECT; 1907 } 1908 1909 for (int i = 0; i < total_in_args ; i++ ) { 1910 out_sig_bt[argc++] = in_sig_bt[i]; 1911 } 1912 1913 // Now figure out where the args must be stored and how much stack space 1914 // they require (neglecting out_preserve_stack_slots but space for storing 1915 // the 1st six register arguments). It's weird see int_stk_helper. 1916 // 1917 int out_arg_slots; 1918 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 1919 1920 // Compute framesize for the wrapper. We need to handlize all oops in 1921 // registers. We must create space for them here that is disjoint from 1922 // the windowed save area because we have no control over when we might 1923 // flush the window again and overwrite values that gc has since modified. 1924 // (The live window race) 1925 // 1926 // We always just allocate 6 word for storing down these object. This allow 1927 // us to simply record the base and use the Ireg number to decide which 1928 // slot to use. (Note that the reg number is the inbound number not the 1929 // outbound number). 1930 // We must shuffle args to match the native convention, and include var-args space. 1931 1932 // Calculate the total number of stack slots we will need. 1933 1934 // First count the abi requirement plus all of the outgoing args 1935 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 1936 1937 // Now the space for the inbound oop handle area 1938 1939 int oop_handle_offset = stack_slots; 1940 stack_slots += 6*VMRegImpl::slots_per_word; 1941 1942 // Now any space we need for handlizing a klass if static method 1943 1944 int oop_temp_slot_offset = 0; 1945 int klass_slot_offset = 0; 1946 int klass_offset = -1; 1947 int lock_slot_offset = 0; 1948 bool is_static = false; 1949 1950 if (method->is_static()) { 1951 klass_slot_offset = stack_slots; 1952 stack_slots += VMRegImpl::slots_per_word; 1953 klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size; 1954 is_static = true; 1955 } 1956 1957 // Plus a lock if needed 1958 1959 if (method->is_synchronized()) { 1960 lock_slot_offset = stack_slots; 1961 stack_slots += VMRegImpl::slots_per_word; 1962 } 1963 1964 // Now a place to save return value or as a temporary for any gpr -> fpr moves 1965 stack_slots += 2; 1966 1967 // Ok The space we have allocated will look like: 1968 // 1969 // 1970 // FP-> | | 1971 // |---------------------| 1972 // | 2 slots for moves | 1973 // |---------------------| 1974 // | lock box (if sync) | 1975 // |---------------------| <- lock_slot_offset 1976 // | klass (if static) | 1977 // |---------------------| <- klass_slot_offset 1978 // | oopHandle area | 1979 // |---------------------| <- oop_handle_offset 1980 // | outbound memory | 1981 // | based arguments | 1982 // | | 1983 // |---------------------| 1984 // | vararg area | 1985 // |---------------------| 1986 // | | 1987 // SP-> | out_preserved_slots | 1988 // 1989 // 1990 1991 1992 // Now compute actual number of stack words we need rounding to make 1993 // stack properly aligned. 1994 stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word); 1995 1996 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 1997 1998 // Generate stack overflow check before creating frame 1999 __ generate_stack_overflow_check(stack_size); 2000 2001 // Generate a new frame for the wrapper. 2002 __ save(SP, -stack_size, SP); 2003 2004 int frame_complete = ((intptr_t)__ pc()) - start; 2005 2006 __ verify_thread(); 2007 2008 2009 // 2010 // We immediately shuffle the arguments so that any vm call we have to 2011 // make from here on out (sync slow path, jvmti, etc.) we will have 2012 // captured the oops from our caller and have a valid oopMap for 2013 // them. 2014 2015 // ----------------- 2016 // The Grand Shuffle 2017 // 2018 // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv* 2019 // (derived from JavaThread* which is in L7_thread_cache) and, if static, 2020 // the class mirror instead of a receiver. This pretty much guarantees that 2021 // register layout will not match. We ignore these extra arguments during 2022 // the shuffle. The shuffle is described by the two calling convention 2023 // vectors we have in our possession. We simply walk the java vector to 2024 // get the source locations and the c vector to get the destinations. 2025 // Because we have a new window and the argument registers are completely 2026 // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about 2027 // here. 2028 2029 // This is a trick. We double the stack slots so we can claim 2030 // the oops in the caller's frame. Since we are sure to have 2031 // more args than the caller doubling is enough to make 2032 // sure we can capture all the incoming oop args from the 2033 // caller. 2034 // 2035 OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/); 2036 int c_arg = total_c_args - 1; 2037 // Record sp-based slot for receiver on stack for non-static methods 2038 int receiver_offset = -1; 2039 2040 // We move the arguments backward because the floating point registers 2041 // destination will always be to a register with a greater or equal register 2042 // number or the stack. 2043 2044#ifdef ASSERT 2045 bool reg_destroyed[RegisterImpl::number_of_registers]; 2046 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2047 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2048 reg_destroyed[r] = false; 2049 } 2050 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2051 freg_destroyed[f] = false; 2052 } 2053 2054#endif /* ASSERT */ 2055 2056 for ( int i = total_in_args - 1; i >= 0 ; i--, c_arg-- ) { 2057 2058#ifdef ASSERT 2059 if (in_regs[i].first()->is_Register()) { 2060 assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!"); 2061 } else if (in_regs[i].first()->is_FloatRegister()) { 2062 assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!"); 2063 } 2064 if (out_regs[c_arg].first()->is_Register()) { 2065 reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true; 2066 } else if (out_regs[c_arg].first()->is_FloatRegister()) { 2067 freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true; 2068 } 2069#endif /* ASSERT */ 2070 2071 switch (in_sig_bt[i]) { 2072 case T_ARRAY: 2073 case T_OBJECT: 2074 object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg], 2075 ((i == 0) && (!is_static)), 2076 &receiver_offset); 2077 break; 2078 case T_VOID: 2079 break; 2080 2081 case T_FLOAT: 2082 float_move(masm, in_regs[i], out_regs[c_arg]); 2083 break; 2084 2085 case T_DOUBLE: 2086 assert( i + 1 < total_in_args && 2087 in_sig_bt[i + 1] == T_VOID && 2088 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2089 double_move(masm, in_regs[i], out_regs[c_arg]); 2090 break; 2091 2092 case T_LONG : 2093 long_move(masm, in_regs[i], out_regs[c_arg]); 2094 break; 2095 2096 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2097 2098 default: 2099 move32_64(masm, in_regs[i], out_regs[c_arg]); 2100 } 2101 } 2102 2103 // Pre-load a static method's oop into O1. Used both by locking code and 2104 // the normal JNI call code. 2105 if (method->is_static()) { 2106 __ set_oop_constant(JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()), O1); 2107 2108 // Now handlize the static class mirror in O1. It's known not-null. 2109 __ st_ptr(O1, SP, klass_offset + STACK_BIAS); 2110 map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 2111 __ add(SP, klass_offset + STACK_BIAS, O1); 2112 } 2113 2114 2115 const Register L6_handle = L6; 2116 2117 if (method->is_synchronized()) { 2118 __ mov(O1, L6_handle); 2119 } 2120 2121 // We have all of the arguments setup at this point. We MUST NOT touch any Oregs 2122 // except O6/O7. So if we must call out we must push a new frame. We immediately 2123 // push a new frame and flush the windows. 2124 2125#ifdef _LP64 2126 intptr_t thepc = (intptr_t) __ pc(); 2127 { 2128 address here = __ pc(); 2129 // Call the next instruction 2130 __ call(here + 8, relocInfo::none); 2131 __ delayed()->nop(); 2132 } 2133#else 2134 intptr_t thepc = __ load_pc_address(O7, 0); 2135#endif /* _LP64 */ 2136 2137 // We use the same pc/oopMap repeatedly when we call out 2138 oop_maps->add_gc_map(thepc - start, map); 2139 2140 // O7 now has the pc loaded that we will use when we finally call to native. 2141 2142 // Save thread in L7; it crosses a bunch of VM calls below 2143 // Don't use save_thread because it smashes G2 and we merely 2144 // want to save a copy 2145 __ mov(G2_thread, L7_thread_cache); 2146 2147 2148 // If we create an inner frame once is plenty 2149 // when we create it we must also save G2_thread 2150 bool inner_frame_created = false; 2151 2152 // dtrace method entry support 2153 { 2154 SkipIfEqual skip_if( 2155 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2156 // create inner frame 2157 __ save_frame(0); 2158 __ mov(G2_thread, L7_thread_cache); 2159 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2160 __ call_VM_leaf(L7_thread_cache, 2161 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2162 G2_thread, O1); 2163 __ restore(); 2164 } 2165 2166 // We are in the jni frame unless saved_frame is true in which case 2167 // we are in one frame deeper (the "inner" frame). If we are in the 2168 // "inner" frames the args are in the Iregs and if the jni frame then 2169 // they are in the Oregs. 2170 // If we ever need to go to the VM (for locking, jvmti) then 2171 // we will always be in the "inner" frame. 2172 2173 // Lock a synchronized method 2174 int lock_offset = -1; // Set if locked 2175 if (method->is_synchronized()) { 2176 Register Roop = O1; 2177 const Register L3_box = L3; 2178 2179 create_inner_frame(masm, &inner_frame_created); 2180 2181 __ ld_ptr(I1, 0, O1); 2182 Label done; 2183 2184 lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size); 2185 __ add(FP, lock_offset+STACK_BIAS, L3_box); 2186#ifdef ASSERT 2187 if (UseBiasedLocking) { 2188 // making the box point to itself will make it clear it went unused 2189 // but also be obviously invalid 2190 __ st_ptr(L3_box, L3_box, 0); 2191 } 2192#endif // ASSERT 2193 // 2194 // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch 2195 // 2196 __ compiler_lock_object(Roop, L1, L3_box, L2); 2197 __ br(Assembler::equal, false, Assembler::pt, done); 2198 __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box); 2199 2200 2201 // None of the above fast optimizations worked so we have to get into the 2202 // slow case of monitor enter. Inline a special case of call_VM that 2203 // disallows any pending_exception. 2204 __ mov(Roop, O0); // Need oop in O0 2205 __ mov(L3_box, O1); 2206 2207 // Record last_Java_sp, in case the VM code releases the JVM lock. 2208 2209 __ set_last_Java_frame(FP, I7); 2210 2211 // do the call 2212 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type); 2213 __ delayed()->mov(L7_thread_cache, O2); 2214 2215 __ restore_thread(L7_thread_cache); // restore G2_thread 2216 __ reset_last_Java_frame(); 2217 2218#ifdef ASSERT 2219 { Label L; 2220 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2221 __ br_null(O0, false, Assembler::pt, L); 2222 __ delayed()->nop(); 2223 __ stop("no pending exception allowed on exit from IR::monitorenter"); 2224 __ bind(L); 2225 } 2226#endif 2227 __ bind(done); 2228 } 2229 2230 2231 // Finally just about ready to make the JNI call 2232 2233 __ flush_windows(); 2234 if (inner_frame_created) { 2235 __ restore(); 2236 } else { 2237 // Store only what we need from this frame 2238 // QQQ I think that non-v9 (like we care) we don't need these saves 2239 // either as the flush traps and the current window goes too. 2240 __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2241 __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS); 2242 } 2243 2244 // get JNIEnv* which is first argument to native 2245 2246 __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0); 2247 2248 // Use that pc we placed in O7 a while back as the current frame anchor 2249 2250 __ set_last_Java_frame(SP, O7); 2251 2252 // Transition from _thread_in_Java to _thread_in_native. 2253 __ set(_thread_in_native, G3_scratch); 2254 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2255 2256 // We flushed the windows ages ago now mark them as flushed 2257 2258 // mark windows as flushed 2259 __ set(JavaFrameAnchor::flushed, G3_scratch); 2260 2261 Address flags(G2_thread, 2262 0, 2263 in_bytes(JavaThread::frame_anchor_offset()) + in_bytes(JavaFrameAnchor::flags_offset())); 2264 2265#ifdef _LP64 2266 Address dest(O7, method->native_function()); 2267 __ relocate(relocInfo::runtime_call_type); 2268 __ jumpl_to(dest, O7); 2269#else 2270 __ call(method->native_function(), relocInfo::runtime_call_type); 2271#endif 2272 __ delayed()->st(G3_scratch, flags); 2273 2274 __ restore_thread(L7_thread_cache); // restore G2_thread 2275 2276 // Unpack native results. For int-types, we do any needed sign-extension 2277 // and move things into I0. The return value there will survive any VM 2278 // calls for blocking or unlocking. An FP or OOP result (handle) is done 2279 // specially in the slow-path code. 2280 switch (ret_type) { 2281 case T_VOID: break; // Nothing to do! 2282 case T_FLOAT: break; // Got it where we want it (unless slow-path) 2283 case T_DOUBLE: break; // Got it where we want it (unless slow-path) 2284 // In 64 bits build result is in O0, in O0, O1 in 32bit build 2285 case T_LONG: 2286#ifndef _LP64 2287 __ mov(O1, I1); 2288#endif 2289 // Fall thru 2290 case T_OBJECT: // Really a handle 2291 case T_ARRAY: 2292 case T_INT: 2293 __ mov(O0, I0); 2294 break; 2295 case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false 2296 case T_BYTE : __ sll(O0, 24, O0); __ sra(O0, 24, I0); break; 2297 case T_CHAR : __ sll(O0, 16, O0); __ srl(O0, 16, I0); break; // cannot use and3, 0xFFFF too big as immediate value! 2298 case T_SHORT : __ sll(O0, 16, O0); __ sra(O0, 16, I0); break; 2299 break; // Cannot de-handlize until after reclaiming jvm_lock 2300 default: 2301 ShouldNotReachHere(); 2302 } 2303 2304 // must we block? 2305 2306 // Block, if necessary, before resuming in _thread_in_Java state. 2307 // In order for GC to work, don't clear the last_Java_sp until after blocking. 2308 { Label no_block; 2309 Address sync_state(G3_scratch, SafepointSynchronize::address_of_state()); 2310 2311 // Switch thread to "native transition" state before reading the synchronization state. 2312 // This additional state is necessary because reading and testing the synchronization 2313 // state is not atomic w.r.t. GC, as this scenario demonstrates: 2314 // Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted. 2315 // VM thread changes sync state to synchronizing and suspends threads for GC. 2316 // Thread A is resumed to finish this native method, but doesn't block here since it 2317 // didn't see any synchronization is progress, and escapes. 2318 __ set(_thread_in_native_trans, G3_scratch); 2319 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2320 if(os::is_MP()) { 2321 if (UseMembar) { 2322 // Force this write out before the read below 2323 __ membar(Assembler::StoreLoad); 2324 } else { 2325 // Write serialization page so VM thread can do a pseudo remote membar. 2326 // We use the current thread pointer to calculate a thread specific 2327 // offset to write to within the page. This minimizes bus traffic 2328 // due to cache line collision. 2329 __ serialize_memory(G2_thread, G1_scratch, G3_scratch); 2330 } 2331 } 2332 __ load_contents(sync_state, G3_scratch); 2333 __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized); 2334 2335 Label L; 2336 Address suspend_state(G2_thread, 0, in_bytes(JavaThread::suspend_flags_offset())); 2337 __ br(Assembler::notEqual, false, Assembler::pn, L); 2338 __ delayed()-> 2339 ld(suspend_state, G3_scratch); 2340 __ cmp(G3_scratch, 0); 2341 __ br(Assembler::equal, false, Assembler::pt, no_block); 2342 __ delayed()->nop(); 2343 __ bind(L); 2344 2345 // Block. Save any potential method result value before the operation and 2346 // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this 2347 // lets us share the oopMap we used when we went native rather the create 2348 // a distinct one for this pc 2349 // 2350 save_native_result(masm, ret_type, stack_slots); 2351 __ call_VM_leaf(L7_thread_cache, 2352 CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), 2353 G2_thread); 2354 2355 // Restore any method result value 2356 restore_native_result(masm, ret_type, stack_slots); 2357 __ bind(no_block); 2358 } 2359 2360 // thread state is thread_in_native_trans. Any safepoint blocking has already 2361 // happened so we can now change state to _thread_in_Java. 2362 2363 2364 __ set(_thread_in_Java, G3_scratch); 2365 __ st(G3_scratch, G2_thread, in_bytes(JavaThread::thread_state_offset())); 2366 2367 2368 Label no_reguard; 2369 __ ld(G2_thread, in_bytes(JavaThread::stack_guard_state_offset()), G3_scratch); 2370 __ cmp(G3_scratch, JavaThread::stack_guard_yellow_disabled); 2371 __ br(Assembler::notEqual, false, Assembler::pt, no_reguard); 2372 __ delayed()->nop(); 2373 2374 save_native_result(masm, ret_type, stack_slots); 2375 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); 2376 __ delayed()->nop(); 2377 2378 __ restore_thread(L7_thread_cache); // restore G2_thread 2379 restore_native_result(masm, ret_type, stack_slots); 2380 2381 __ bind(no_reguard); 2382 2383 // Handle possible exception (will unlock if necessary) 2384 2385 // native result if any is live in freg or I0 (and I1 if long and 32bit vm) 2386 2387 // Unlock 2388 if (method->is_synchronized()) { 2389 Label done; 2390 Register I2_ex_oop = I2; 2391 const Register L3_box = L3; 2392 // Get locked oop from the handle we passed to jni 2393 __ ld_ptr(L6_handle, 0, L4); 2394 __ add(SP, lock_offset+STACK_BIAS, L3_box); 2395 // Must save pending exception around the slow-path VM call. Since it's a 2396 // leaf call, the pending exception (if any) can be kept in a register. 2397 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop); 2398 // Now unlock 2399 // (Roop, Rmark, Rbox, Rscratch) 2400 __ compiler_unlock_object(L4, L1, L3_box, L2); 2401 __ br(Assembler::equal, false, Assembler::pt, done); 2402 __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box); 2403 2404 // save and restore any potential method result value around the unlocking 2405 // operation. Will save in I0 (or stack for FP returns). 2406 save_native_result(masm, ret_type, stack_slots); 2407 2408 // Must clear pending-exception before re-entering the VM. Since this is 2409 // a leaf call, pending-exception-oop can be safely kept in a register. 2410 __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset())); 2411 2412 // slow case of monitor enter. Inline a special case of call_VM that 2413 // disallows any pending_exception. 2414 __ mov(L3_box, O1); 2415 2416 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type); 2417 __ delayed()->mov(L4, O0); // Need oop in O0 2418 2419 __ restore_thread(L7_thread_cache); // restore G2_thread 2420 2421#ifdef ASSERT 2422 { Label L; 2423 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0); 2424 __ br_null(O0, false, Assembler::pt, L); 2425 __ delayed()->nop(); 2426 __ stop("no pending exception allowed on exit from IR::monitorexit"); 2427 __ bind(L); 2428 } 2429#endif 2430 restore_native_result(masm, ret_type, stack_slots); 2431 // check_forward_pending_exception jump to forward_exception if any pending 2432 // exception is set. The forward_exception routine expects to see the 2433 // exception in pending_exception and not in a register. Kind of clumsy, 2434 // since all folks who branch to forward_exception must have tested 2435 // pending_exception first and hence have it in a register already. 2436 __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset())); 2437 __ bind(done); 2438 } 2439 2440 // Tell dtrace about this method exit 2441 { 2442 SkipIfEqual skip_if( 2443 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 2444 save_native_result(masm, ret_type, stack_slots); 2445 __ set_oop_constant(JNIHandles::make_local(method()), O1); 2446 __ call_VM_leaf(L7_thread_cache, 2447 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2448 G2_thread, O1); 2449 restore_native_result(masm, ret_type, stack_slots); 2450 } 2451 2452 // Clear "last Java frame" SP and PC. 2453 __ verify_thread(); // G2_thread must be correct 2454 __ reset_last_Java_frame(); 2455 2456 // Unpack oop result 2457 if (ret_type == T_OBJECT || ret_type == T_ARRAY) { 2458 Label L; 2459 __ addcc(G0, I0, G0); 2460 __ brx(Assembler::notZero, true, Assembler::pt, L); 2461 __ delayed()->ld_ptr(I0, 0, I0); 2462 __ mov(G0, I0); 2463 __ bind(L); 2464 __ verify_oop(I0); 2465 } 2466 2467 // reset handle block 2468 __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5); 2469 __ st_ptr(G0, L5, JNIHandleBlock::top_offset_in_bytes()); 2470 2471 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch); 2472 check_forward_pending_exception(masm, G3_scratch); 2473 2474 2475 // Return 2476 2477#ifndef _LP64 2478 if (ret_type == T_LONG) { 2479 2480 // Must leave proper result in O0,O1 and G1 (c2/tiered only) 2481 __ sllx(I0, 32, G1); // Shift bits into high G1 2482 __ srl (I1, 0, I1); // Zero extend O1 (harmless?) 2483 __ or3 (I1, G1, G1); // OR 64 bits into G1 2484 } 2485#endif 2486 2487 __ ret(); 2488 __ delayed()->restore(); 2489 2490 __ flush(); 2491 2492 nmethod *nm = nmethod::new_native_nmethod(method, 2493 masm->code(), 2494 vep_offset, 2495 frame_complete, 2496 stack_slots / VMRegImpl::slots_per_word, 2497 (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)), 2498 in_ByteSize(lock_offset), 2499 oop_maps); 2500 return nm; 2501 2502} 2503 2504#ifdef HAVE_DTRACE_H 2505// --------------------------------------------------------------------------- 2506// Generate a dtrace nmethod for a given signature. The method takes arguments 2507// in the Java compiled code convention, marshals them to the native 2508// abi and then leaves nops at the position you would expect to call a native 2509// function. When the probe is enabled the nops are replaced with a trap 2510// instruction that dtrace inserts and the trace will cause a notification 2511// to dtrace. 2512// 2513// The probes are only able to take primitive types and java/lang/String as 2514// arguments. No other java types are allowed. Strings are converted to utf8 2515// strings so that from dtrace point of view java strings are converted to C 2516// strings. There is an arbitrary fixed limit on the total space that a method 2517// can use for converting the strings. (256 chars per string in the signature). 2518// So any java string larger then this is truncated. 2519 2520static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 }; 2521static bool offsets_initialized = false; 2522 2523static VMRegPair reg64_to_VMRegPair(Register r) { 2524 VMRegPair ret; 2525 if (wordSize == 8) { 2526 ret.set2(r->as_VMReg()); 2527 } else { 2528 ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg()); 2529 } 2530 return ret; 2531} 2532 2533 2534nmethod *SharedRuntime::generate_dtrace_nmethod( 2535 MacroAssembler *masm, methodHandle method) { 2536 2537 2538 // generate_dtrace_nmethod is guarded by a mutex so we are sure to 2539 // be single threaded in this method. 2540 assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be"); 2541 2542 // Fill in the signature array, for the calling-convention call. 2543 int total_args_passed = method->size_of_parameters(); 2544 2545 BasicType* in_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed); 2546 VMRegPair *in_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed); 2547 2548 // The signature we are going to use for the trap that dtrace will see 2549 // java/lang/String is converted. We drop "this" and any other object 2550 // is converted to NULL. (A one-slot java/lang/Long object reference 2551 // is converted to a two-slot long, which is why we double the allocation). 2552 BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2); 2553 VMRegPair* out_regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2); 2554 2555 int i=0; 2556 int total_strings = 0; 2557 int first_arg_to_pass = 0; 2558 int total_c_args = 0; 2559 2560 // Skip the receiver as dtrace doesn't want to see it 2561 if( !method->is_static() ) { 2562 in_sig_bt[i++] = T_OBJECT; 2563 first_arg_to_pass = 1; 2564 } 2565 2566 SignatureStream ss(method->signature()); 2567 for ( ; !ss.at_return_type(); ss.next()) { 2568 BasicType bt = ss.type(); 2569 in_sig_bt[i++] = bt; // Collect remaining bits of signature 2570 out_sig_bt[total_c_args++] = bt; 2571 if( bt == T_OBJECT) { 2572 symbolOop s = ss.as_symbol_or_null(); 2573 if (s == vmSymbols::java_lang_String()) { 2574 total_strings++; 2575 out_sig_bt[total_c_args-1] = T_ADDRESS; 2576 } else if (s == vmSymbols::java_lang_Boolean() || 2577 s == vmSymbols::java_lang_Byte()) { 2578 out_sig_bt[total_c_args-1] = T_BYTE; 2579 } else if (s == vmSymbols::java_lang_Character() || 2580 s == vmSymbols::java_lang_Short()) { 2581 out_sig_bt[total_c_args-1] = T_SHORT; 2582 } else if (s == vmSymbols::java_lang_Integer() || 2583 s == vmSymbols::java_lang_Float()) { 2584 out_sig_bt[total_c_args-1] = T_INT; 2585 } else if (s == vmSymbols::java_lang_Long() || 2586 s == vmSymbols::java_lang_Double()) { 2587 out_sig_bt[total_c_args-1] = T_LONG; 2588 out_sig_bt[total_c_args++] = T_VOID; 2589 } 2590 } else if ( bt == T_LONG || bt == T_DOUBLE ) { 2591 in_sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots 2592 // We convert double to long 2593 out_sig_bt[total_c_args-1] = T_LONG; 2594 out_sig_bt[total_c_args++] = T_VOID; 2595 } else if ( bt == T_FLOAT) { 2596 // We convert float to int 2597 out_sig_bt[total_c_args-1] = T_INT; 2598 } 2599 } 2600 2601 assert(i==total_args_passed, "validly parsed signature"); 2602 2603 // Now get the compiled-Java layout as input arguments 2604 int comp_args_on_stack; 2605 comp_args_on_stack = SharedRuntime::java_calling_convention( 2606 in_sig_bt, in_regs, total_args_passed, false); 2607 2608 // We have received a description of where all the java arg are located 2609 // on entry to the wrapper. We need to convert these args to where 2610 // the a native (non-jni) function would expect them. To figure out 2611 // where they go we convert the java signature to a C signature and remove 2612 // T_VOID for any long/double we might have received. 2613 2614 2615 // Now figure out where the args must be stored and how much stack space 2616 // they require (neglecting out_preserve_stack_slots but space for storing 2617 // the 1st six register arguments). It's weird see int_stk_helper. 2618 // 2619 int out_arg_slots; 2620 out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args); 2621 2622 // Calculate the total number of stack slots we will need. 2623 2624 // First count the abi requirement plus all of the outgoing args 2625 int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots; 2626 2627 // Plus a temp for possible converion of float/double/long register args 2628 2629 int conversion_temp = stack_slots; 2630 stack_slots += 2; 2631 2632 2633 // Now space for the string(s) we must convert 2634 2635 int string_locs = stack_slots; 2636 stack_slots += total_strings * 2637 (max_dtrace_string_size / VMRegImpl::stack_slot_size); 2638 2639 // Ok The space we have allocated will look like: 2640 // 2641 // 2642 // FP-> | | 2643 // |---------------------| 2644 // | string[n] | 2645 // |---------------------| <- string_locs[n] 2646 // | string[n-1] | 2647 // |---------------------| <- string_locs[n-1] 2648 // | ... | 2649 // | ... | 2650 // |---------------------| <- string_locs[1] 2651 // | string[0] | 2652 // |---------------------| <- string_locs[0] 2653 // | temp | 2654 // |---------------------| <- conversion_temp 2655 // | outbound memory | 2656 // | based arguments | 2657 // | | 2658 // |---------------------| 2659 // | | 2660 // SP-> | out_preserved_slots | 2661 // 2662 // 2663 2664 // Now compute actual number of stack words we need rounding to make 2665 // stack properly aligned. 2666 stack_slots = round_to(stack_slots, 4 * VMRegImpl::slots_per_word); 2667 2668 int stack_size = stack_slots * VMRegImpl::stack_slot_size; 2669 2670 intptr_t start = (intptr_t)__ pc(); 2671 2672 // First thing make an ic check to see if we should even be here 2673 2674 { 2675 Label L; 2676 const Register temp_reg = G3_scratch; 2677 Address ic_miss(temp_reg, SharedRuntime::get_ic_miss_stub()); 2678 __ verify_oop(O0); 2679 __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), temp_reg); 2680 __ cmp(temp_reg, G5_inline_cache_reg); 2681 __ brx(Assembler::equal, true, Assembler::pt, L); 2682 __ delayed()->nop(); 2683 2684 __ jump_to(ic_miss, 0); 2685 __ delayed()->nop(); 2686 __ align(CodeEntryAlignment); 2687 __ bind(L); 2688 } 2689 2690 int vep_offset = ((intptr_t)__ pc()) - start; 2691 2692 2693 // The instruction at the verified entry point must be 5 bytes or longer 2694 // because it can be patched on the fly by make_non_entrant. The stack bang 2695 // instruction fits that requirement. 2696 2697 // Generate stack overflow check before creating frame 2698 __ generate_stack_overflow_check(stack_size); 2699 2700 assert(((intptr_t)__ pc() - start - vep_offset) >= 5, 2701 "valid size for make_non_entrant"); 2702 2703 // Generate a new frame for the wrapper. 2704 __ save(SP, -stack_size, SP); 2705 2706 // Frame is now completed as far a size and linkage. 2707 2708 int frame_complete = ((intptr_t)__ pc()) - start; 2709 2710#ifdef ASSERT 2711 bool reg_destroyed[RegisterImpl::number_of_registers]; 2712 bool freg_destroyed[FloatRegisterImpl::number_of_registers]; 2713 for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) { 2714 reg_destroyed[r] = false; 2715 } 2716 for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) { 2717 freg_destroyed[f] = false; 2718 } 2719 2720#endif /* ASSERT */ 2721 2722 VMRegPair zero; 2723 const Register g0 = G0; // without this we get a compiler warning (why??) 2724 zero.set2(g0->as_VMReg()); 2725 2726 int c_arg, j_arg; 2727 2728 Register conversion_off = noreg; 2729 2730 for (j_arg = first_arg_to_pass, c_arg = 0 ; 2731 j_arg < total_args_passed ; j_arg++, c_arg++ ) { 2732 2733 VMRegPair src = in_regs[j_arg]; 2734 VMRegPair dst = out_regs[c_arg]; 2735 2736#ifdef ASSERT 2737 if (src.first()->is_Register()) { 2738 assert(!reg_destroyed[src.first()->as_Register()->encoding()], "ack!"); 2739 } else if (src.first()->is_FloatRegister()) { 2740 assert(!freg_destroyed[src.first()->as_FloatRegister()->encoding( 2741 FloatRegisterImpl::S)], "ack!"); 2742 } 2743 if (dst.first()->is_Register()) { 2744 reg_destroyed[dst.first()->as_Register()->encoding()] = true; 2745 } else if (dst.first()->is_FloatRegister()) { 2746 freg_destroyed[dst.first()->as_FloatRegister()->encoding( 2747 FloatRegisterImpl::S)] = true; 2748 } 2749#endif /* ASSERT */ 2750 2751 switch (in_sig_bt[j_arg]) { 2752 case T_ARRAY: 2753 case T_OBJECT: 2754 { 2755 if (out_sig_bt[c_arg] == T_BYTE || out_sig_bt[c_arg] == T_SHORT || 2756 out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) { 2757 // need to unbox a one-slot value 2758 Register in_reg = L0; 2759 Register tmp = L2; 2760 if ( src.first()->is_reg() ) { 2761 in_reg = src.first()->as_Register(); 2762 } else { 2763 assert(Assembler::is_simm13(reg2offset(src.first()) + STACK_BIAS), 2764 "must be"); 2765 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, in_reg); 2766 } 2767 // If the final destination is an acceptable register 2768 if ( dst.first()->is_reg() ) { 2769 if ( dst.is_single_phys_reg() || out_sig_bt[c_arg] != T_LONG ) { 2770 tmp = dst.first()->as_Register(); 2771 } 2772 } 2773 2774 Label skipUnbox; 2775 if ( wordSize == 4 && out_sig_bt[c_arg] == T_LONG ) { 2776 __ mov(G0, tmp->successor()); 2777 } 2778 __ br_null(in_reg, true, Assembler::pn, skipUnbox); 2779 __ delayed()->mov(G0, tmp); 2780 2781 BasicType bt = out_sig_bt[c_arg]; 2782 int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt); 2783 switch (bt) { 2784 case T_BYTE: 2785 __ ldub(in_reg, box_offset, tmp); break; 2786 case T_SHORT: 2787 __ lduh(in_reg, box_offset, tmp); break; 2788 case T_INT: 2789 __ ld(in_reg, box_offset, tmp); break; 2790 case T_LONG: 2791 __ ld_long(in_reg, box_offset, tmp); break; 2792 default: ShouldNotReachHere(); 2793 } 2794 2795 __ bind(skipUnbox); 2796 // If tmp wasn't final destination copy to final destination 2797 if (tmp == L2) { 2798 VMRegPair tmp_as_VM = reg64_to_VMRegPair(L2); 2799 if (out_sig_bt[c_arg] == T_LONG) { 2800 long_move(masm, tmp_as_VM, dst); 2801 } else { 2802 move32_64(masm, tmp_as_VM, out_regs[c_arg]); 2803 } 2804 } 2805 if (out_sig_bt[c_arg] == T_LONG) { 2806 assert(out_sig_bt[c_arg+1] == T_VOID, "must be"); 2807 ++c_arg; // move over the T_VOID to keep the loop indices in sync 2808 } 2809 } else if (out_sig_bt[c_arg] == T_ADDRESS) { 2810 Register s = 2811 src.first()->is_reg() ? src.first()->as_Register() : L2; 2812 Register d = 2813 dst.first()->is_reg() ? dst.first()->as_Register() : L2; 2814 2815 // We store the oop now so that the conversion pass can reach 2816 // while in the inner frame. This will be the only store if 2817 // the oop is NULL. 2818 if (s != L2) { 2819 // src is register 2820 if (d != L2) { 2821 // dst is register 2822 __ mov(s, d); 2823 } else { 2824 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2825 STACK_BIAS), "must be"); 2826 __ st_ptr(s, SP, reg2offset(dst.first()) + STACK_BIAS); 2827 } 2828 } else { 2829 // src not a register 2830 assert(Assembler::is_simm13(reg2offset(src.first()) + 2831 STACK_BIAS), "must be"); 2832 __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, d); 2833 if (d == L2) { 2834 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2835 STACK_BIAS), "must be"); 2836 __ st_ptr(d, SP, reg2offset(dst.first()) + STACK_BIAS); 2837 } 2838 } 2839 } else if (out_sig_bt[c_arg] != T_VOID) { 2840 // Convert the arg to NULL 2841 if (dst.first()->is_reg()) { 2842 __ mov(G0, dst.first()->as_Register()); 2843 } else { 2844 assert(Assembler::is_simm13(reg2offset(dst.first()) + 2845 STACK_BIAS), "must be"); 2846 __ st_ptr(G0, SP, reg2offset(dst.first()) + STACK_BIAS); 2847 } 2848 } 2849 } 2850 break; 2851 case T_VOID: 2852 break; 2853 2854 case T_FLOAT: 2855 if (src.first()->is_stack()) { 2856 // Stack to stack/reg is simple 2857 move32_64(masm, src, dst); 2858 } else { 2859 if (dst.first()->is_reg()) { 2860 // freg -> reg 2861 int off = 2862 STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2863 Register d = dst.first()->as_Register(); 2864 if (Assembler::is_simm13(off)) { 2865 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2866 SP, off); 2867 __ ld(SP, off, d); 2868 } else { 2869 if (conversion_off == noreg) { 2870 __ set(off, L6); 2871 conversion_off = L6; 2872 } 2873 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2874 SP, conversion_off); 2875 __ ld(SP, conversion_off , d); 2876 } 2877 } else { 2878 // freg -> mem 2879 int off = STACK_BIAS + reg2offset(dst.first()); 2880 if (Assembler::is_simm13(off)) { 2881 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2882 SP, off); 2883 } else { 2884 if (conversion_off == noreg) { 2885 __ set(off, L6); 2886 conversion_off = L6; 2887 } 2888 __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), 2889 SP, conversion_off); 2890 } 2891 } 2892 } 2893 break; 2894 2895 case T_DOUBLE: 2896 assert( j_arg + 1 < total_args_passed && 2897 in_sig_bt[j_arg + 1] == T_VOID && 2898 out_sig_bt[c_arg+1] == T_VOID, "bad arg list"); 2899 if (src.first()->is_stack()) { 2900 // Stack to stack/reg is simple 2901 long_move(masm, src, dst); 2902 } else { 2903 Register d = dst.first()->is_reg() ? dst.first()->as_Register() : L2; 2904 2905 // Destination could be an odd reg on 32bit in which case 2906 // we can't load direct to the destination. 2907 2908 if (!d->is_even() && wordSize == 4) { 2909 d = L2; 2910 } 2911 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2912 if (Assembler::is_simm13(off)) { 2913 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 2914 SP, off); 2915 __ ld_long(SP, off, d); 2916 } else { 2917 if (conversion_off == noreg) { 2918 __ set(off, L6); 2919 conversion_off = L6; 2920 } 2921 __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), 2922 SP, conversion_off); 2923 __ ld_long(SP, conversion_off, d); 2924 } 2925 if (d == L2) { 2926 long_move(masm, reg64_to_VMRegPair(L2), dst); 2927 } 2928 } 2929 break; 2930 2931 case T_LONG : 2932 // 32bit can't do a split move of something like g1 -> O0, O1 2933 // so use a memory temp 2934 if (src.is_single_phys_reg() && wordSize == 4) { 2935 Register tmp = L2; 2936 if (dst.first()->is_reg() && 2937 (wordSize == 8 || dst.first()->as_Register()->is_even())) { 2938 tmp = dst.first()->as_Register(); 2939 } 2940 2941 int off = STACK_BIAS + conversion_temp * VMRegImpl::stack_slot_size; 2942 if (Assembler::is_simm13(off)) { 2943 __ stx(src.first()->as_Register(), SP, off); 2944 __ ld_long(SP, off, tmp); 2945 } else { 2946 if (conversion_off == noreg) { 2947 __ set(off, L6); 2948 conversion_off = L6; 2949 } 2950 __ stx(src.first()->as_Register(), SP, conversion_off); 2951 __ ld_long(SP, conversion_off, tmp); 2952 } 2953 2954 if (tmp == L2) { 2955 long_move(masm, reg64_to_VMRegPair(L2), dst); 2956 } 2957 } else { 2958 long_move(masm, src, dst); 2959 } 2960 break; 2961 2962 case T_ADDRESS: assert(false, "found T_ADDRESS in java args"); 2963 2964 default: 2965 move32_64(masm, src, dst); 2966 } 2967 } 2968 2969 2970 // If we have any strings we must store any register based arg to the stack 2971 // This includes any still live xmm registers too. 2972 2973 if (total_strings > 0 ) { 2974 2975 // protect all the arg registers 2976 __ save_frame(0); 2977 __ mov(G2_thread, L7_thread_cache); 2978 const Register L2_string_off = L2; 2979 2980 // Get first string offset 2981 __ set(string_locs * VMRegImpl::stack_slot_size, L2_string_off); 2982 2983 for (c_arg = 0 ; c_arg < total_c_args ; c_arg++ ) { 2984 if (out_sig_bt[c_arg] == T_ADDRESS) { 2985 2986 VMRegPair dst = out_regs[c_arg]; 2987 const Register d = dst.first()->is_reg() ? 2988 dst.first()->as_Register()->after_save() : noreg; 2989 2990 // It's a string the oop and it was already copied to the out arg 2991 // position 2992 if (d != noreg) { 2993 __ mov(d, O0); 2994 } else { 2995 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 2996 "must be"); 2997 __ ld_ptr(FP, reg2offset(dst.first()) + STACK_BIAS, O0); 2998 } 2999 Label skip; 3000 3001 __ br_null(O0, false, Assembler::pn, skip); 3002 __ delayed()->add(FP, L2_string_off, O1); 3003 3004 if (d != noreg) { 3005 __ mov(O1, d); 3006 } else { 3007 assert(Assembler::is_simm13(reg2offset(dst.first()) + STACK_BIAS), 3008 "must be"); 3009 __ st_ptr(O1, FP, reg2offset(dst.first()) + STACK_BIAS); 3010 } 3011 3012 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::get_utf), 3013 relocInfo::runtime_call_type); 3014 __ delayed()->add(L2_string_off, max_dtrace_string_size, L2_string_off); 3015 3016 __ bind(skip); 3017 3018 } 3019 3020 } 3021 __ mov(L7_thread_cache, G2_thread); 3022 __ restore(); 3023 3024 } 3025 3026 3027 // Ok now we are done. Need to place the nop that dtrace wants in order to 3028 // patch in the trap 3029 3030 int patch_offset = ((intptr_t)__ pc()) - start; 3031 3032 __ nop(); 3033 3034 3035 // Return 3036 3037 __ ret(); 3038 __ delayed()->restore(); 3039 3040 __ flush(); 3041 3042 nmethod *nm = nmethod::new_dtrace_nmethod( 3043 method, masm->code(), vep_offset, patch_offset, frame_complete, 3044 stack_slots / VMRegImpl::slots_per_word); 3045 return nm; 3046 3047} 3048 3049#endif // HAVE_DTRACE_H 3050 3051// this function returns the adjust size (in number of words) to a c2i adapter 3052// activation for use during deoptimization 3053int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) { 3054 assert(callee_locals >= callee_parameters, 3055 "test and remove; got more parms than locals"); 3056 if (callee_locals < callee_parameters) 3057 return 0; // No adjustment for negative locals 3058 int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords(); 3059 return round_to(diff, WordsPerLong); 3060} 3061 3062// "Top of Stack" slots that may be unused by the calling convention but must 3063// otherwise be preserved. 3064// On Intel these are not necessary and the value can be zero. 3065// On Sparc this describes the words reserved for storing a register window 3066// when an interrupt occurs. 3067uint SharedRuntime::out_preserve_stack_slots() { 3068 return frame::register_save_words * VMRegImpl::slots_per_word; 3069} 3070 3071static void gen_new_frame(MacroAssembler* masm, bool deopt) { 3072// 3073// Common out the new frame generation for deopt and uncommon trap 3074// 3075 Register G3pcs = G3_scratch; // Array of new pcs (input) 3076 Register Oreturn0 = O0; 3077 Register Oreturn1 = O1; 3078 Register O2UnrollBlock = O2; 3079 Register O3array = O3; // Array of frame sizes (input) 3080 Register O4array_size = O4; // number of frames (input) 3081 Register O7frame_size = O7; // number of frames (input) 3082 3083 __ ld_ptr(O3array, 0, O7frame_size); 3084 __ sub(G0, O7frame_size, O7frame_size); 3085 __ save(SP, O7frame_size, SP); 3086 __ ld_ptr(G3pcs, 0, I7); // load frame's new pc 3087 3088 #ifdef ASSERT 3089 // make sure that the frames are aligned properly 3090#ifndef _LP64 3091 __ btst(wordSize*2-1, SP); 3092 __ breakpoint_trap(Assembler::notZero); 3093#endif 3094 #endif 3095 3096 // Deopt needs to pass some extra live values from frame to frame 3097 3098 if (deopt) { 3099 __ mov(Oreturn0->after_save(), Oreturn0); 3100 __ mov(Oreturn1->after_save(), Oreturn1); 3101 } 3102 3103 __ mov(O4array_size->after_save(), O4array_size); 3104 __ sub(O4array_size, 1, O4array_size); 3105 __ mov(O3array->after_save(), O3array); 3106 __ mov(O2UnrollBlock->after_save(), O2UnrollBlock); 3107 __ add(G3pcs, wordSize, G3pcs); // point to next pc value 3108 3109 #ifdef ASSERT 3110 // trash registers to show a clear pattern in backtraces 3111 __ set(0xDEAD0000, I0); 3112 __ add(I0, 2, I1); 3113 __ add(I0, 4, I2); 3114 __ add(I0, 6, I3); 3115 __ add(I0, 8, I4); 3116 // Don't touch I5 could have valuable savedSP 3117 __ set(0xDEADBEEF, L0); 3118 __ mov(L0, L1); 3119 __ mov(L0, L2); 3120 __ mov(L0, L3); 3121 __ mov(L0, L4); 3122 __ mov(L0, L5); 3123 3124 // trash the return value as there is nothing to return yet 3125 __ set(0xDEAD0001, O7); 3126 #endif 3127 3128 __ mov(SP, O5_savedSP); 3129} 3130 3131 3132static void make_new_frames(MacroAssembler* masm, bool deopt) { 3133 // 3134 // loop through the UnrollBlock info and create new frames 3135 // 3136 Register G3pcs = G3_scratch; 3137 Register Oreturn0 = O0; 3138 Register Oreturn1 = O1; 3139 Register O2UnrollBlock = O2; 3140 Register O3array = O3; 3141 Register O4array_size = O4; 3142 Label loop; 3143 3144 // Before we make new frames, check to see if stack is available. 3145 // Do this after the caller's return address is on top of stack 3146 if (UseStackBanging) { 3147 // Get total frame size for interpreted frames 3148 __ ld(Address(O2UnrollBlock, 0, 3149 Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()), O4); 3150 __ bang_stack_size(O4, O3, G3_scratch); 3151 } 3152 3153 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()), O4array_size); 3154 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()), G3pcs); 3155 3156 __ ld_ptr(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()), O3array); 3157 3158 // Adjust old interpreter frame to make space for new frame's extra java locals 3159 // 3160 // We capture the original sp for the transition frame only because it is needed in 3161 // order to properly calculate interpreter_sp_adjustment. Even though in real life 3162 // every interpreter frame captures a savedSP it is only needed at the transition 3163 // (fortunately). If we had to have it correct everywhere then we would need to 3164 // be told the sp_adjustment for each frame we create. If the frame size array 3165 // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size] 3166 // for each frame we create and keep up the illusion every where. 3167 // 3168 3169 __ ld(Address(O2UnrollBlock, 0, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()), O7); 3170 __ mov(SP, O5_savedSP); // remember initial sender's original sp before adjustment 3171 __ sub(SP, O7, SP); 3172 3173#ifdef ASSERT 3174 // make sure that there is at least one entry in the array 3175 __ tst(O4array_size); 3176 __ breakpoint_trap(Assembler::zero); 3177#endif 3178 3179 // Now push the new interpreter frames 3180 __ bind(loop); 3181 3182 // allocate a new frame, filling the registers 3183 3184 gen_new_frame(masm, deopt); // allocate an interpreter frame 3185 3186 __ tst(O4array_size); 3187 __ br(Assembler::notZero, false, Assembler::pn, loop); 3188 __ delayed()->add(O3array, wordSize, O3array); 3189 __ ld_ptr(G3pcs, 0, O7); // load final frame new pc 3190 3191} 3192 3193//------------------------------generate_deopt_blob---------------------------- 3194// Ought to generate an ideal graph & compile, but here's some SPARC ASM 3195// instead. 3196void SharedRuntime::generate_deopt_blob() { 3197 // allocate space for the code 3198 ResourceMark rm; 3199 // setup code generation tools 3200 int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code 3201#ifdef _LP64 3202 CodeBuffer buffer("deopt_blob", 2100+pad, 512); 3203#else 3204 // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread) 3205 // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread) 3206 CodeBuffer buffer("deopt_blob", 1600+pad, 512); 3207#endif /* _LP64 */ 3208 MacroAssembler* masm = new MacroAssembler(&buffer); 3209 FloatRegister Freturn0 = F0; 3210 Register Greturn1 = G1; 3211 Register Oreturn0 = O0; 3212 Register Oreturn1 = O1; 3213 Register O2UnrollBlock = O2; 3214 Register O3tmp = O3; 3215 Register I5exception_tmp = I5; 3216 Register G4exception_tmp = G4_scratch; 3217 int frame_size_words; 3218 Address saved_Freturn0_addr(FP, 0, -sizeof(double) + STACK_BIAS); 3219#if !defined(_LP64) && defined(COMPILER2) 3220 Address saved_Greturn1_addr(FP, 0, -sizeof(double) -sizeof(jlong) + STACK_BIAS); 3221#endif 3222 Label cont; 3223 3224 OopMapSet *oop_maps = new OopMapSet(); 3225 3226 // 3227 // This is the entry point for code which is returning to a de-optimized 3228 // frame. 3229 // The steps taken by this frame are as follows: 3230 // - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1) 3231 // and all potentially live registers (at a pollpoint many registers can be live). 3232 // 3233 // - call the C routine: Deoptimization::fetch_unroll_info (this function 3234 // returns information about the number and size of interpreter frames 3235 // which are equivalent to the frame which is being deoptimized) 3236 // - deallocate the unpack frame, restoring only results values. Other 3237 // volatile registers will now be captured in the vframeArray as needed. 3238 // - deallocate the deoptimization frame 3239 // - in a loop using the information returned in the previous step 3240 // push new interpreter frames (take care to propagate the return 3241 // values through each new frame pushed) 3242 // - create a dummy "unpack_frame" and save the return values (O0, O1, F0) 3243 // - call the C routine: Deoptimization::unpack_frames (this function 3244 // lays out values on the interpreter frame which was just created) 3245 // - deallocate the dummy unpack_frame 3246 // - ensure that all the return values are correctly set and then do 3247 // a return to the interpreter entry point 3248 // 3249 // Refer to the following methods for more information: 3250 // - Deoptimization::fetch_unroll_info 3251 // - Deoptimization::unpack_frames 3252 3253 OopMap* map = NULL; 3254 3255 int start = __ offset(); 3256 3257 // restore G2, the trampoline destroyed it 3258 __ get_thread(); 3259 3260 // On entry we have been called by the deoptimized nmethod with a call that 3261 // replaced the original call (or safepoint polling location) so the deoptimizing 3262 // pc is now in O7. Return values are still in the expected places 3263 3264 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3265 __ ba(false, cont); 3266 __ delayed()->mov(Deoptimization::Unpack_deopt, I5exception_tmp); 3267 3268 int exception_offset = __ offset() - start; 3269 3270 // restore G2, the trampoline destroyed it 3271 __ get_thread(); 3272 3273 // On entry we have been jumped to by the exception handler (or exception_blob 3274 // for server). O0 contains the exception oop and O7 contains the original 3275 // exception pc. So if we push a frame here it will look to the 3276 // stack walking code (fetch_unroll_info) just like a normal call so 3277 // state will be extracted normally. 3278 3279 // save exception oop in JavaThread and fall through into the 3280 // exception_in_tls case since they are handled in same way except 3281 // for where the pending exception is kept. 3282 __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3283 3284 // 3285 // Vanilla deoptimization with an exception pending in exception_oop 3286 // 3287 int exception_in_tls_offset = __ offset() - start; 3288 3289 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3290 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3291 3292 // Restore G2_thread 3293 __ get_thread(); 3294 3295#ifdef ASSERT 3296 { 3297 // verify that there is really an exception oop in exception_oop 3298 Label has_exception; 3299 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3300 __ br_notnull(Oexception, false, Assembler::pt, has_exception); 3301 __ delayed()-> nop(); 3302 __ stop("no exception in thread"); 3303 __ bind(has_exception); 3304 3305 // verify that there is no pending exception 3306 Label no_pending_exception; 3307 Address exception_addr(G2_thread, 0, in_bytes(Thread::pending_exception_offset())); 3308 __ ld_ptr(exception_addr, Oexception); 3309 __ br_null(Oexception, false, Assembler::pt, no_pending_exception); 3310 __ delayed()->nop(); 3311 __ stop("must not have pending exception here"); 3312 __ bind(no_pending_exception); 3313 } 3314#endif 3315 3316 __ ba(false, cont); 3317 __ delayed()->mov(Deoptimization::Unpack_exception, I5exception_tmp);; 3318 3319 // 3320 // Reexecute entry, similar to c2 uncommon trap 3321 // 3322 int reexecute_offset = __ offset() - start; 3323 3324 // No need to update oop_map as each call to save_live_registers will produce identical oopmap 3325 (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3326 3327 __ mov(Deoptimization::Unpack_reexecute, I5exception_tmp); 3328 3329 __ bind(cont); 3330 3331 __ set_last_Java_frame(SP, noreg); 3332 3333 // do the call by hand so we can get the oopmap 3334 3335 __ mov(G2_thread, L7_thread_cache); 3336 __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type); 3337 __ delayed()->mov(G2_thread, O0); 3338 3339 // Set an oopmap for the call site this describes all our saved volatile registers 3340 3341 oop_maps->add_gc_map( __ offset()-start, map); 3342 3343 __ mov(L7_thread_cache, G2_thread); 3344 3345 __ reset_last_Java_frame(); 3346 3347 // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers 3348 // so this move will survive 3349 3350 __ mov(I5exception_tmp, G4exception_tmp); 3351 3352 __ mov(O0, O2UnrollBlock->after_save()); 3353 3354 RegisterSaver::restore_result_registers(masm); 3355 3356 Label noException; 3357 __ cmp(G4exception_tmp, Deoptimization::Unpack_exception); // Was exception pending? 3358 __ br(Assembler::notEqual, false, Assembler::pt, noException); 3359 __ delayed()->nop(); 3360 3361 // Move the pending exception from exception_oop to Oexception so 3362 // the pending exception will be picked up the interpreter. 3363 __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception); 3364 __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 3365 __ bind(noException); 3366 3367 // deallocate the deoptimization frame taking care to preserve the return values 3368 __ mov(Oreturn0, Oreturn0->after_save()); 3369 __ mov(Oreturn1, Oreturn1->after_save()); 3370 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3371 __ restore(); 3372 3373 // Allocate new interpreter frame(s) and possible c2i adapter frame 3374 3375 make_new_frames(masm, true); 3376 3377 // push a dummy "unpack_frame" taking care of float return values and 3378 // call Deoptimization::unpack_frames to have the unpacker layout 3379 // information in the interpreter frames just created and then return 3380 // to the interpreter entry point 3381 __ save(SP, -frame_size_words*wordSize, SP); 3382 __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr); 3383#if !defined(_LP64) 3384#if defined(COMPILER2) 3385 if (!TieredCompilation) { 3386 // 32-bit 1-register longs return longs in G1 3387 __ stx(Greturn1, saved_Greturn1_addr); 3388 } 3389#endif 3390 __ set_last_Java_frame(SP, noreg); 3391 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4exception_tmp); 3392#else 3393 // LP64 uses g4 in set_last_Java_frame 3394 __ mov(G4exception_tmp, O1); 3395 __ set_last_Java_frame(SP, G0); 3396 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1); 3397#endif 3398 __ reset_last_Java_frame(); 3399 __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0); 3400 3401 // In tiered we never use C2 to compile methods returning longs so 3402 // the result is where we expect it already. 3403 3404#if !defined(_LP64) && defined(COMPILER2) 3405 // In 32 bit, C2 returns longs in G1 so restore the saved G1 into 3406 // I0/I1 if the return value is long. In the tiered world there is 3407 // a mismatch between how C1 and C2 return longs compiles and so 3408 // currently compilation of methods which return longs is disabled 3409 // for C2 and so is this code. Eventually C1 and C2 will do the 3410 // same thing for longs in the tiered world. 3411 if (!TieredCompilation) { 3412 Label not_long; 3413 __ cmp(O0,T_LONG); 3414 __ br(Assembler::notEqual, false, Assembler::pt, not_long); 3415 __ delayed()->nop(); 3416 __ ldd(saved_Greturn1_addr,I0); 3417 __ bind(not_long); 3418 } 3419#endif 3420 __ ret(); 3421 __ delayed()->restore(); 3422 3423 masm->flush(); 3424 _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words); 3425 _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); 3426} 3427 3428#ifdef COMPILER2 3429 3430//------------------------------generate_uncommon_trap_blob-------------------- 3431// Ought to generate an ideal graph & compile, but here's some SPARC ASM 3432// instead. 3433void SharedRuntime::generate_uncommon_trap_blob() { 3434 // allocate space for the code 3435 ResourceMark rm; 3436 // setup code generation tools 3437 int pad = VerifyThread ? 512 : 0; 3438#ifdef _LP64 3439 CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512); 3440#else 3441 // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread) 3442 // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread) 3443 CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512); 3444#endif 3445 MacroAssembler* masm = new MacroAssembler(&buffer); 3446 Register O2UnrollBlock = O2; 3447 Register O3tmp = O3; 3448 Register O2klass_index = O2; 3449 3450 // 3451 // This is the entry point for all traps the compiler takes when it thinks 3452 // it cannot handle further execution of compilation code. The frame is 3453 // deoptimized in these cases and converted into interpreter frames for 3454 // execution 3455 // The steps taken by this frame are as follows: 3456 // - push a fake "unpack_frame" 3457 // - call the C routine Deoptimization::uncommon_trap (this function 3458 // packs the current compiled frame into vframe arrays and returns 3459 // information about the number and size of interpreter frames which 3460 // are equivalent to the frame which is being deoptimized) 3461 // - deallocate the "unpack_frame" 3462 // - deallocate the deoptimization frame 3463 // - in a loop using the information returned in the previous step 3464 // push interpreter frames; 3465 // - create a dummy "unpack_frame" 3466 // - call the C routine: Deoptimization::unpack_frames (this function 3467 // lays out values on the interpreter frame which was just created) 3468 // - deallocate the dummy unpack_frame 3469 // - return to the interpreter entry point 3470 // 3471 // Refer to the following methods for more information: 3472 // - Deoptimization::uncommon_trap 3473 // - Deoptimization::unpack_frame 3474 3475 // the unloaded class index is in O0 (first parameter to this blob) 3476 3477 // push a dummy "unpack_frame" 3478 // and call Deoptimization::uncommon_trap to pack the compiled frame into 3479 // vframe array and return the UnrollBlock information 3480 __ save_frame(0); 3481 __ set_last_Java_frame(SP, noreg); 3482 __ mov(I0, O2klass_index); 3483 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index); 3484 __ reset_last_Java_frame(); 3485 __ mov(O0, O2UnrollBlock->after_save()); 3486 __ restore(); 3487 3488 // deallocate the deoptimized frame taking care to preserve the return values 3489 __ mov(O2UnrollBlock, O2UnrollBlock->after_save()); 3490 __ restore(); 3491 3492 // Allocate new interpreter frame(s) and possible c2i adapter frame 3493 3494 make_new_frames(masm, false); 3495 3496 // push a dummy "unpack_frame" taking care of float return values and 3497 // call Deoptimization::unpack_frames to have the unpacker layout 3498 // information in the interpreter frames just created and then return 3499 // to the interpreter entry point 3500 __ save_frame(0); 3501 __ set_last_Java_frame(SP, noreg); 3502 __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case 3503 __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3); 3504 __ reset_last_Java_frame(); 3505 __ ret(); 3506 __ delayed()->restore(); 3507 3508 masm->flush(); 3509 _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize); 3510} 3511 3512#endif // COMPILER2 3513 3514//------------------------------generate_handler_blob------------------- 3515// 3516// Generate a special Compile2Runtime blob that saves all registers, and sets 3517// up an OopMap. 3518// 3519// This blob is jumped to (via a breakpoint and the signal handler) from a 3520// safepoint in compiled code. On entry to this blob, O7 contains the 3521// address in the original nmethod at which we should resume normal execution. 3522// Thus, this blob looks like a subroutine which must preserve lots of 3523// registers and return normally. Note that O7 is never register-allocated, 3524// so it is guaranteed to be free here. 3525// 3526 3527// The hardest part of what this blob must do is to save the 64-bit %o 3528// registers in the 32-bit build. A simple 'save' turn the %o's to %i's and 3529// an interrupt will chop off their heads. Making space in the caller's frame 3530// first will let us save the 64-bit %o's before save'ing, but we cannot hand 3531// the adjusted FP off to the GC stack-crawler: this will modify the caller's 3532// SP and mess up HIS OopMaps. So we first adjust the caller's SP, then save 3533// the 64-bit %o's, then do a save, then fixup the caller's SP (our FP). 3534// Tricky, tricky, tricky... 3535 3536static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) { 3537 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3538 3539 // allocate space for the code 3540 ResourceMark rm; 3541 // setup code generation tools 3542 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3543 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3544 // even larger with TraceJumps 3545 int pad = TraceJumps ? 512 : 0; 3546 CodeBuffer buffer("handler_blob", 1600 + pad, 512); 3547 MacroAssembler* masm = new MacroAssembler(&buffer); 3548 int frame_size_words; 3549 OopMapSet *oop_maps = new OopMapSet(); 3550 OopMap* map = NULL; 3551 3552 int start = __ offset(); 3553 3554 // If this causes a return before the processing, then do a "restore" 3555 if (cause_return) { 3556 __ restore(); 3557 } else { 3558 // Make it look like we were called via the poll 3559 // so that frame constructor always sees a valid return address 3560 __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7); 3561 __ sub(O7, frame::pc_return_offset, O7); 3562 } 3563 3564 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3565 3566 // setup last_Java_sp (blows G4) 3567 __ set_last_Java_frame(SP, noreg); 3568 3569 // call into the runtime to handle illegal instructions exception 3570 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3571 __ mov(G2_thread, O0); 3572 __ save_thread(L7_thread_cache); 3573 __ call(call_ptr); 3574 __ delayed()->nop(); 3575 3576 // Set an oopmap for the call site. 3577 // We need this not only for callee-saved registers, but also for volatile 3578 // registers that the compiler might be keeping live across a safepoint. 3579 3580 oop_maps->add_gc_map( __ offset() - start, map); 3581 3582 __ restore_thread(L7_thread_cache); 3583 // clear last_Java_sp 3584 __ reset_last_Java_frame(); 3585 3586 // Check for exceptions 3587 Label pending; 3588 3589 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3590 __ tst(O1); 3591 __ brx(Assembler::notEqual, true, Assembler::pn, pending); 3592 __ delayed()->nop(); 3593 3594 RegisterSaver::restore_live_registers(masm); 3595 3596 // We are back the the original state on entry and ready to go. 3597 3598 __ retl(); 3599 __ delayed()->nop(); 3600 3601 // Pending exception after the safepoint 3602 3603 __ bind(pending); 3604 3605 RegisterSaver::restore_live_registers(masm); 3606 3607 // We are back the the original state on entry. 3608 3609 // Tail-call forward_exception_entry, with the issuing PC in O7, 3610 // so it looks like the original nmethod called forward_exception_entry. 3611 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3612 __ JMP(O0, 0); 3613 __ delayed()->nop(); 3614 3615 // ------------- 3616 // make sure all code is generated 3617 masm->flush(); 3618 3619 // return exception blob 3620 return SafepointBlob::create(&buffer, oop_maps, frame_size_words); 3621} 3622 3623// 3624// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss 3625// 3626// Generate a stub that calls into vm to find out the proper destination 3627// of a java call. All the argument registers are live at this point 3628// but since this is generic code we don't know what they are and the caller 3629// must do any gc of the args. 3630// 3631static RuntimeStub* generate_resolve_blob(address destination, const char* name) { 3632 assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before"); 3633 3634 // allocate space for the code 3635 ResourceMark rm; 3636 // setup code generation tools 3637 // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread) 3638 // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread) 3639 // even larger with TraceJumps 3640 int pad = TraceJumps ? 512 : 0; 3641 CodeBuffer buffer(name, 1600 + pad, 512); 3642 MacroAssembler* masm = new MacroAssembler(&buffer); 3643 int frame_size_words; 3644 OopMapSet *oop_maps = new OopMapSet(); 3645 OopMap* map = NULL; 3646 3647 int start = __ offset(); 3648 3649 map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words); 3650 3651 int frame_complete = __ offset(); 3652 3653 // setup last_Java_sp (blows G4) 3654 __ set_last_Java_frame(SP, noreg); 3655 3656 // call into the runtime to handle illegal instructions exception 3657 // Do not use call_VM_leaf, because we need to make a GC map at this call site. 3658 __ mov(G2_thread, O0); 3659 __ save_thread(L7_thread_cache); 3660 __ call(destination, relocInfo::runtime_call_type); 3661 __ delayed()->nop(); 3662 3663 // O0 contains the address we are going to jump to assuming no exception got installed 3664 3665 // Set an oopmap for the call site. 3666 // We need this not only for callee-saved registers, but also for volatile 3667 // registers that the compiler might be keeping live across a safepoint. 3668 3669 oop_maps->add_gc_map( __ offset() - start, map); 3670 3671 __ restore_thread(L7_thread_cache); 3672 // clear last_Java_sp 3673 __ reset_last_Java_frame(); 3674 3675 // Check for exceptions 3676 Label pending; 3677 3678 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 3679 __ tst(O1); 3680 __ brx(Assembler::notEqual, true, Assembler::pn, pending); 3681 __ delayed()->nop(); 3682 3683 // get the returned methodOop 3684 3685 __ get_vm_result(G5_method); 3686 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 3687 3688 // O0 is where we want to jump, overwrite G3 which is saved and scratch 3689 3690 __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS); 3691 3692 RegisterSaver::restore_live_registers(masm); 3693 3694 // We are back the the original state on entry and ready to go. 3695 3696 __ JMP(G3, 0); 3697 __ delayed()->nop(); 3698 3699 // Pending exception after the safepoint 3700 3701 __ bind(pending); 3702 3703 RegisterSaver::restore_live_registers(masm); 3704 3705 // We are back the the original state on entry. 3706 3707 // Tail-call forward_exception_entry, with the issuing PC in O7, 3708 // so it looks like the original nmethod called forward_exception_entry. 3709 __ set((intptr_t)StubRoutines::forward_exception_entry(), O0); 3710 __ JMP(O0, 0); 3711 __ delayed()->nop(); 3712 3713 // ------------- 3714 // make sure all code is generated 3715 masm->flush(); 3716 3717 // return the blob 3718 // frame_size_words or bytes?? 3719 return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true); 3720} 3721 3722void SharedRuntime::generate_stubs() { 3723 3724 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), 3725 "wrong_method_stub"); 3726 3727 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), 3728 "ic_miss_stub"); 3729 3730 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), 3731 "resolve_opt_virtual_call"); 3732 3733 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), 3734 "resolve_virtual_call"); 3735 3736 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), 3737 "resolve_static_call"); 3738 3739 _polling_page_safepoint_handler_blob = 3740 generate_handler_blob(CAST_FROM_FN_PTR(address, 3741 SafepointSynchronize::handle_polling_page_exception), false); 3742 3743 _polling_page_return_handler_blob = 3744 generate_handler_blob(CAST_FROM_FN_PTR(address, 3745 SafepointSynchronize::handle_polling_page_exception), true); 3746 3747 generate_deopt_blob(); 3748 3749#ifdef COMPILER2 3750 generate_uncommon_trap_blob(); 3751#endif // COMPILER2 3752} 3753