1/* 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP 26#define CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP 27 28#include "asm/assembler.inline.hpp" 29#include "asm/macroAssembler.hpp" 30#include "asm/codeBuffer.hpp" 31#include "code/codeCache.hpp" 32 33inline bool Address::is_simm13(int offset) { return Assembler::is_simm13(disp() + offset); } 34 35 36inline int AddressLiteral::low10() const { 37 return Assembler::low10(value()); 38} 39 40 41inline void MacroAssembler::pd_patch_instruction(address branch, address target) { 42 jint& stub_inst = *(jint*) branch; 43 stub_inst = patched_branch(target - branch, stub_inst, 0); 44} 45 46// Use the right loads/stores for the platform 47inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) { 48#ifdef _LP64 49 Assembler::ldx(s1, s2, d); 50#else 51 ld( s1, s2, d); 52#endif 53} 54 55inline void MacroAssembler::ld_ptr( Register s1, int simm13a, Register d ) { 56#ifdef _LP64 57 Assembler::ldx(s1, simm13a, d); 58#else 59 ld( s1, simm13a, d); 60#endif 61} 62 63#ifdef ASSERT 64// ByteSize is only a class when ASSERT is defined, otherwise it's an int. 65inline void MacroAssembler::ld_ptr( Register s1, ByteSize simm13a, Register d ) { 66 ld_ptr(s1, in_bytes(simm13a), d); 67} 68#endif 69 70inline void MacroAssembler::ld_ptr( Register s1, RegisterOrConstant s2, Register d ) { 71#ifdef _LP64 72 ldx(s1, s2, d); 73#else 74 ld( s1, s2, d); 75#endif 76} 77 78inline void MacroAssembler::ld_ptr(const Address& a, Register d, int offset) { 79#ifdef _LP64 80 ldx(a, d, offset); 81#else 82 ld( a, d, offset); 83#endif 84} 85 86inline void MacroAssembler::st_ptr( Register d, Register s1, Register s2 ) { 87#ifdef _LP64 88 Assembler::stx(d, s1, s2); 89#else 90 st( d, s1, s2); 91#endif 92} 93 94inline void MacroAssembler::st_ptr( Register d, Register s1, int simm13a ) { 95#ifdef _LP64 96 Assembler::stx(d, s1, simm13a); 97#else 98 st( d, s1, simm13a); 99#endif 100} 101 102#ifdef ASSERT 103// ByteSize is only a class when ASSERT is defined, otherwise it's an int. 104inline void MacroAssembler::st_ptr( Register d, Register s1, ByteSize simm13a ) { 105 st_ptr(d, s1, in_bytes(simm13a)); 106} 107#endif 108 109inline void MacroAssembler::st_ptr( Register d, Register s1, RegisterOrConstant s2 ) { 110#ifdef _LP64 111 stx(d, s1, s2); 112#else 113 st( d, s1, s2); 114#endif 115} 116 117inline void MacroAssembler::st_ptr(Register d, const Address& a, int offset) { 118#ifdef _LP64 119 stx(d, a, offset); 120#else 121 st( d, a, offset); 122#endif 123} 124 125// Use the right loads/stores for the platform 126inline void MacroAssembler::ld_long( Register s1, Register s2, Register d ) { 127#ifdef _LP64 128 Assembler::ldx(s1, s2, d); 129#else 130 Assembler::ldd(s1, s2, d); 131#endif 132} 133 134inline void MacroAssembler::ld_long( Register s1, int simm13a, Register d ) { 135#ifdef _LP64 136 Assembler::ldx(s1, simm13a, d); 137#else 138 Assembler::ldd(s1, simm13a, d); 139#endif 140} 141 142inline void MacroAssembler::ld_long( Register s1, RegisterOrConstant s2, Register d ) { 143#ifdef _LP64 144 ldx(s1, s2, d); 145#else 146 ldd(s1, s2, d); 147#endif 148} 149 150inline void MacroAssembler::ld_long(const Address& a, Register d, int offset) { 151#ifdef _LP64 152 ldx(a, d, offset); 153#else 154 ldd(a, d, offset); 155#endif 156} 157 158inline void MacroAssembler::st_long( Register d, Register s1, Register s2 ) { 159#ifdef _LP64 160 Assembler::stx(d, s1, s2); 161#else 162 Assembler::std(d, s1, s2); 163#endif 164} 165 166inline void MacroAssembler::st_long( Register d, Register s1, int simm13a ) { 167#ifdef _LP64 168 Assembler::stx(d, s1, simm13a); 169#else 170 Assembler::std(d, s1, simm13a); 171#endif 172} 173 174inline void MacroAssembler::st_long( Register d, Register s1, RegisterOrConstant s2 ) { 175#ifdef _LP64 176 stx(d, s1, s2); 177#else 178 std(d, s1, s2); 179#endif 180} 181 182inline void MacroAssembler::st_long( Register d, const Address& a, int offset ) { 183#ifdef _LP64 184 stx(d, a, offset); 185#else 186 std(d, a, offset); 187#endif 188} 189 190inline void MacroAssembler::stbool(Register d, const Address& a) { stb(d, a); } 191inline void MacroAssembler::ldbool(const Address& a, Register d) { ldub(a, d); } 192inline void MacroAssembler::movbool( bool boolconst, Register d) { mov( (int) boolconst, d); } 193 194 195inline void MacroAssembler::signx( Register s, Register d ) { sra( s, G0, d); } 196inline void MacroAssembler::signx( Register d ) { sra( d, G0, d); } 197 198inline void MacroAssembler::not1( Register s, Register d ) { xnor( s, G0, d ); } 199inline void MacroAssembler::not1( Register d ) { xnor( d, G0, d ); } 200 201inline void MacroAssembler::neg( Register s, Register d ) { sub( G0, s, d ); } 202inline void MacroAssembler::neg( Register d ) { sub( G0, d, d ); } 203 204inline void MacroAssembler::cas( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); } 205inline void MacroAssembler::casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); } 206 207// Functions for isolating 64 bit atomic swaps for LP64 208// cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's 209inline void MacroAssembler::cas_ptr( Register s1, Register s2, Register d) { 210#ifdef _LP64 211 casx( s1, s2, d ); 212#else 213 cas( s1, s2, d ); 214#endif 215} 216 217// Functions for isolating 64 bit shifts for LP64 218 219inline void MacroAssembler::sll_ptr( Register s1, Register s2, Register d ) { 220#ifdef _LP64 221 Assembler::sllx(s1, s2, d); 222#else 223 Assembler::sll( s1, s2, d); 224#endif 225} 226 227inline void MacroAssembler::sll_ptr( Register s1, int imm6a, Register d ) { 228#ifdef _LP64 229 Assembler::sllx(s1, imm6a, d); 230#else 231 Assembler::sll( s1, imm6a, d); 232#endif 233} 234 235inline void MacroAssembler::srl_ptr( Register s1, Register s2, Register d ) { 236#ifdef _LP64 237 Assembler::srlx(s1, s2, d); 238#else 239 Assembler::srl( s1, s2, d); 240#endif 241} 242 243inline void MacroAssembler::srl_ptr( Register s1, int imm6a, Register d ) { 244#ifdef _LP64 245 Assembler::srlx(s1, imm6a, d); 246#else 247 Assembler::srl( s1, imm6a, d); 248#endif 249} 250 251inline void MacroAssembler::sll_ptr( Register s1, RegisterOrConstant s2, Register d ) { 252 if (s2.is_register()) sll_ptr(s1, s2.as_register(), d); 253 else sll_ptr(s1, s2.as_constant(), d); 254} 255 256inline void MacroAssembler::casl( Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); } 257inline void MacroAssembler::casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); } 258 259inline void MacroAssembler::inc( Register d, int const13 ) { add( d, const13, d); } 260inline void MacroAssembler::inccc( Register d, int const13 ) { addcc( d, const13, d); } 261 262inline void MacroAssembler::dec( Register d, int const13 ) { sub( d, const13, d); } 263inline void MacroAssembler::deccc( Register d, int const13 ) { subcc( d, const13, d); } 264 265// Use the right branch for the platform 266 267inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 268 Assembler::bp(c, a, icc, p, d, rt); 269} 270 271inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { 272 insert_nop_after_cbcond(); 273 br(c, a, p, target(L)); 274} 275 276 277// Branch that tests either xcc or icc depending on the 278// architecture compiled (LP64 or not) 279inline void MacroAssembler::brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 280#ifdef _LP64 281 Assembler::bp(c, a, xcc, p, d, rt); 282#else 283 MacroAssembler::br(c, a, p, d, rt); 284#endif 285} 286 287inline void MacroAssembler::brx( Condition c, bool a, Predict p, Label& L ) { 288 insert_nop_after_cbcond(); 289 brx(c, a, p, target(L)); 290} 291 292inline void MacroAssembler::ba( Label& L ) { 293 br(always, false, pt, L); 294} 295 296// Warning: V9 only functions 297inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { 298 Assembler::bp(c, a, cc, p, d, rt); 299} 300 301inline void MacroAssembler::bp( Condition c, bool a, CC cc, Predict p, Label& L ) { 302 Assembler::bp(c, a, cc, p, L); 303} 304 305inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 306 fbp(c, a, fcc0, p, d, rt); 307} 308 309inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { 310 insert_nop_after_cbcond(); 311 fb(c, a, p, target(L)); 312} 313 314inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { 315 Assembler::fbp(c, a, cc, p, d, rt); 316} 317 318inline void MacroAssembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { 319 Assembler::fbp(c, a, cc, p, L); 320} 321 322inline void MacroAssembler::jmp( Register s1, Register s2 ) { jmpl( s1, s2, G0 ); } 323inline void MacroAssembler::jmp( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, G0, rspec); } 324 325inline bool MacroAssembler::is_far_target(address d) { 326 if (ForceUnreachable) { 327 // References outside the code cache should be treated as far 328 return d < CodeCache::low_bound() || d > CodeCache::high_bound(); 329 } 330 return !is_in_wdisp30_range(d, CodeCache::low_bound()) || !is_in_wdisp30_range(d, CodeCache::high_bound()); 331} 332 333// Call with a check to see if we need to deal with the added 334// expense of relocation and if we overflow the displacement 335// of the quick call instruction. 336inline void MacroAssembler::call( address d, relocInfo::relocType rt ) { 337 MacroAssembler::call(d, Relocation::spec_simple(rt)); 338} 339 340inline void MacroAssembler::call( address d, RelocationHolder const& rspec ) { 341#ifdef _LP64 342 intptr_t disp; 343 // NULL is ok because it will be relocated later. 344 // Must change NULL to a reachable address in order to 345 // pass asserts here and in wdisp. 346 if ( d == NULL ) 347 d = pc(); 348 349 // Is this address within range of the call instruction? 350 // If not, use the expensive instruction sequence 351 if (is_far_target(d)) { 352 relocate(rspec); 353 AddressLiteral dest(d); 354 jumpl_to(dest, O7, O7); 355 } else { 356 Assembler::call(d, rspec); 357 } 358#else 359 Assembler::call( d, rspec ); 360#endif 361} 362 363inline void MacroAssembler::call( Label& L, relocInfo::relocType rt ) { 364 insert_nop_after_cbcond(); 365 MacroAssembler::call( target(L), rt); 366} 367 368 369 370inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); } 371inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); } 372 373// prefetch instruction 374inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { 375 Assembler::bp( never, true, xcc, pt, d, rt ); 376 Assembler::bp( never, true, xcc, pt, d, rt ); 377} 378inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } 379 380inline void MacroAssembler::tst( Register s ) { orcc( G0, s, G0 ); } 381 382inline void MacroAssembler::ret( bool trace ) { 383 if (trace) { 384 mov(I7, O7); // traceable register 385 JMP(O7, 2 * BytesPerInstWord); 386 } else { 387 jmpl( I7, 2 * BytesPerInstWord, G0 ); 388 } 389} 390 391inline void MacroAssembler::retl( bool trace ) { 392 if (trace) { 393 JMP(O7, 2 * BytesPerInstWord); 394 } else { 395 jmpl( O7, 2 * BytesPerInstWord, G0 ); 396 } 397} 398 399// clobbers o7 on V8!! 400// returns delta from gotten pc to addr after 401inline int MacroAssembler::get_pc( Register d ) { 402 int x = offset(); 403 rdpc(d); 404 return offset() - x; 405} 406 407inline void MacroAssembler::cmp( Register s1, Register s2 ) { subcc( s1, s2, G0 ); } 408inline void MacroAssembler::cmp( Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); } 409 410// Note: All MacroAssembler::set_foo functions are defined out-of-line. 411 412 413// Loads the current PC of the following instruction as an immediate value in 414// 2 instructions. All PCs in the CodeCache are within 2 Gig of each other. 415inline intptr_t MacroAssembler::load_pc_address( Register reg, int bytes_to_skip ) { 416 intptr_t thepc = (intptr_t)pc() + 2*BytesPerInstWord + bytes_to_skip; 417#ifdef _LP64 418 Unimplemented(); 419#else 420 Assembler::sethi( thepc & ~0x3ff, reg, internal_word_Relocation::spec((address)thepc)); 421 add(reg, thepc & 0x3ff, reg, internal_word_Relocation::spec((address)thepc)); 422#endif 423 return thepc; 424} 425 426 427inline void MacroAssembler::load_contents(const AddressLiteral& addrlit, Register d, int offset) { 428 assert_not_delayed(); 429 if (ForceUnreachable) { 430 patchable_sethi(addrlit, d); 431 } else { 432 sethi(addrlit, d); 433 } 434 ld(d, addrlit.low10() + offset, d); 435} 436 437 438inline void MacroAssembler::load_bool_contents(const AddressLiteral& addrlit, Register d, int offset) { 439 assert_not_delayed(); 440 if (ForceUnreachable) { 441 patchable_sethi(addrlit, d); 442 } else { 443 sethi(addrlit, d); 444 } 445 ldub(d, addrlit.low10() + offset, d); 446} 447 448 449inline void MacroAssembler::load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset) { 450 assert_not_delayed(); 451 if (ForceUnreachable) { 452 patchable_sethi(addrlit, d); 453 } else { 454 sethi(addrlit, d); 455 } 456 ld_ptr(d, addrlit.low10() + offset, d); 457} 458 459 460inline void MacroAssembler::store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { 461 assert_not_delayed(); 462 if (ForceUnreachable) { 463 patchable_sethi(addrlit, temp); 464 } else { 465 sethi(addrlit, temp); 466 } 467 st(s, temp, addrlit.low10() + offset); 468} 469 470 471inline void MacroAssembler::store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset) { 472 assert_not_delayed(); 473 if (ForceUnreachable) { 474 patchable_sethi(addrlit, temp); 475 } else { 476 sethi(addrlit, temp); 477 } 478 st_ptr(s, temp, addrlit.low10() + offset); 479} 480 481 482// This code sequence is relocatable to any address, even on LP64. 483inline void MacroAssembler::jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset) { 484 assert_not_delayed(); 485 // Force fixed length sethi because NativeJump and NativeFarCall don't handle 486 // variable length instruction streams. 487 patchable_sethi(addrlit, temp); 488 jmpl(temp, addrlit.low10() + offset, d); 489} 490 491 492inline void MacroAssembler::jump_to(const AddressLiteral& addrlit, Register temp, int offset) { 493 jumpl_to(addrlit, temp, G0, offset); 494} 495 496 497inline void MacroAssembler::jump_indirect_to(Address& a, Register temp, 498 int ld_offset, int jmp_offset) { 499 assert_not_delayed(); 500 //sethi(al); // sethi is caller responsibility for this one 501 ld_ptr(a, temp, ld_offset); 502 jmp(temp, jmp_offset); 503} 504 505 506inline void MacroAssembler::set_metadata(Metadata* obj, Register d) { 507 set_metadata(allocate_metadata_address(obj), d); 508} 509 510inline void MacroAssembler::set_metadata_constant(Metadata* obj, Register d) { 511 set_metadata(constant_metadata_address(obj), d); 512} 513 514inline void MacroAssembler::set_metadata(const AddressLiteral& obj_addr, Register d) { 515 assert(obj_addr.rspec().type() == relocInfo::metadata_type, "must be a metadata reloc"); 516 set(obj_addr, d); 517} 518 519inline void MacroAssembler::set_oop(jobject obj, Register d) { 520 set_oop(allocate_oop_address(obj), d); 521} 522 523 524inline void MacroAssembler::set_oop_constant(jobject obj, Register d) { 525 set_oop(constant_oop_address(obj), d); 526} 527 528 529inline void MacroAssembler::set_oop(const AddressLiteral& obj_addr, Register d) { 530 assert(obj_addr.rspec().type() == relocInfo::oop_type, "must be an oop reloc"); 531 set(obj_addr, d); 532} 533 534 535inline void MacroAssembler::load_argument( Argument& a, Register d ) { 536 if (a.is_register()) 537 mov(a.as_register(), d); 538 else 539 ld (a.as_address(), d); 540} 541 542inline void MacroAssembler::store_argument( Register s, Argument& a ) { 543 if (a.is_register()) 544 mov(s, a.as_register()); 545 else 546 st_ptr (s, a.as_address()); // ABI says everything is right justified. 547} 548 549inline void MacroAssembler::store_ptr_argument( Register s, Argument& a ) { 550 if (a.is_register()) 551 mov(s, a.as_register()); 552 else 553 st_ptr (s, a.as_address()); 554} 555 556 557#ifdef _LP64 558inline void MacroAssembler::store_float_argument( FloatRegister s, Argument& a ) { 559 if (a.is_float_register()) 560// V9 ABI has F1, F3, F5 are used to pass instead of O0, O1, O2 561 fmov(FloatRegisterImpl::S, s, a.as_float_register() ); 562 else 563 // Floats are stored in the high half of the stack entry 564 // The low half is undefined per the ABI. 565 stf(FloatRegisterImpl::S, s, a.as_address(), sizeof(jfloat)); 566} 567 568inline void MacroAssembler::store_double_argument( FloatRegister s, Argument& a ) { 569 if (a.is_float_register()) 570// V9 ABI has D0, D2, D4 are used to pass instead of O0, O1, O2 571 fmov(FloatRegisterImpl::D, s, a.as_double_register() ); 572 else 573 stf(FloatRegisterImpl::D, s, a.as_address()); 574} 575 576inline void MacroAssembler::store_long_argument( Register s, Argument& a ) { 577 if (a.is_register()) 578 mov(s, a.as_register()); 579 else 580 stx(s, a.as_address()); 581} 582#endif 583 584inline void MacroAssembler::round_to( Register r, int modulus ) { 585 assert_not_delayed(); 586 inc( r, modulus - 1 ); 587 and3( r, -modulus, r ); 588} 589 590inline void MacroAssembler::add(Register s1, int simm13a, Register d, relocInfo::relocType rtype) { 591 relocate(rtype); 592 add(s1, simm13a, d); 593} 594inline void MacroAssembler::add(Register s1, int simm13a, Register d, RelocationHolder const& rspec) { 595 relocate(rspec); 596 add(s1, simm13a, d); 597} 598 599// form effective addresses this way: 600inline void MacroAssembler::add(const Address& a, Register d, int offset) { 601 if (a.has_index()) add(a.base(), a.index(), d); 602 else { add(a.base(), a.disp() + offset, d, a.rspec(offset)); offset = 0; } 603 if (offset != 0) add(d, offset, d); 604} 605inline void MacroAssembler::add(Register s1, RegisterOrConstant s2, Register d, int offset) { 606 if (s2.is_register()) add(s1, s2.as_register(), d); 607 else { add(s1, s2.as_constant() + offset, d); offset = 0; } 608 if (offset != 0) add(d, offset, d); 609} 610 611inline void MacroAssembler::andn(Register s1, RegisterOrConstant s2, Register d) { 612 if (s2.is_register()) andn(s1, s2.as_register(), d); 613 else andn(s1, s2.as_constant(), d); 614} 615 616inline void MacroAssembler::btst( Register s1, Register s2 ) { andcc( s1, s2, G0 ); } 617inline void MacroAssembler::btst( int simm13a, Register s ) { andcc( s, simm13a, G0 ); } 618 619inline void MacroAssembler::bset( Register s1, Register s2 ) { or3( s1, s2, s2 ); } 620inline void MacroAssembler::bset( int simm13a, Register s ) { or3( s, simm13a, s ); } 621 622inline void MacroAssembler::bclr( Register s1, Register s2 ) { andn( s1, s2, s2 ); } 623inline void MacroAssembler::bclr( int simm13a, Register s ) { andn( s, simm13a, s ); } 624 625inline void MacroAssembler::btog( Register s1, Register s2 ) { xor3( s1, s2, s2 ); } 626inline void MacroAssembler::btog( int simm13a, Register s ) { xor3( s, simm13a, s ); } 627 628inline void MacroAssembler::clr( Register d ) { or3( G0, G0, d ); } 629 630inline void MacroAssembler::clrb( Register s1, Register s2) { stb( G0, s1, s2 ); } 631inline void MacroAssembler::clrh( Register s1, Register s2) { sth( G0, s1, s2 ); } 632inline void MacroAssembler::clr( Register s1, Register s2) { stw( G0, s1, s2 ); } 633inline void MacroAssembler::clrx( Register s1, Register s2) { stx( G0, s1, s2 ); } 634 635inline void MacroAssembler::clrb( Register s1, int simm13a) { stb( G0, s1, simm13a); } 636inline void MacroAssembler::clrh( Register s1, int simm13a) { sth( G0, s1, simm13a); } 637inline void MacroAssembler::clr( Register s1, int simm13a) { stw( G0, s1, simm13a); } 638inline void MacroAssembler::clrx( Register s1, int simm13a) { stx( G0, s1, simm13a); } 639 640inline void MacroAssembler::clruw( Register s, Register d ) { srl( s, G0, d); } 641inline void MacroAssembler::clruwu( Register d ) { srl( d, G0, d); } 642 643#ifdef _LP64 644// Make all 32 bit loads signed so 64 bit registers maintain proper sign 645inline void MacroAssembler::ld( Register s1, Register s2, Register d) { ldsw( s1, s2, d); } 646inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { ldsw( s1, simm13a, d); } 647#else 648inline void MacroAssembler::ld( Register s1, Register s2, Register d) { lduw( s1, s2, d); } 649inline void MacroAssembler::ld( Register s1, int simm13a, Register d) { lduw( s1, simm13a, d); } 650#endif 651 652#ifdef ASSERT 653 // ByteSize is only a class when ASSERT is defined, otherwise it's an int. 654# ifdef _LP64 655inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { ldsw( s1, in_bytes(simm13a), d); } 656# else 657inline void MacroAssembler::ld(Register s1, ByteSize simm13a, Register d) { lduw( s1, in_bytes(simm13a), d); } 658# endif 659#endif 660 661inline void MacroAssembler::ld( const Address& a, Register d, int offset) { 662 if (a.has_index()) { assert(offset == 0, ""); ld( a.base(), a.index(), d); } 663 else { ld( a.base(), a.disp() + offset, d); } 664} 665 666inline void MacroAssembler::ldsb(const Address& a, Register d, int offset) { 667 if (a.has_index()) { assert(offset == 0, ""); ldsb(a.base(), a.index(), d); } 668 else { ldsb(a.base(), a.disp() + offset, d); } 669} 670inline void MacroAssembler::ldsh(const Address& a, Register d, int offset) { 671 if (a.has_index()) { assert(offset == 0, ""); ldsh(a.base(), a.index(), d); } 672 else { ldsh(a.base(), a.disp() + offset, d); } 673} 674inline void MacroAssembler::ldsw(const Address& a, Register d, int offset) { 675 if (a.has_index()) { assert(offset == 0, ""); ldsw(a.base(), a.index(), d); } 676 else { ldsw(a.base(), a.disp() + offset, d); } 677} 678inline void MacroAssembler::ldub(const Address& a, Register d, int offset) { 679 if (a.has_index()) { assert(offset == 0, ""); ldub(a.base(), a.index(), d); } 680 else { ldub(a.base(), a.disp() + offset, d); } 681} 682inline void MacroAssembler::lduh(const Address& a, Register d, int offset) { 683 if (a.has_index()) { assert(offset == 0, ""); lduh(a.base(), a.index(), d); } 684 else { lduh(a.base(), a.disp() + offset, d); } 685} 686inline void MacroAssembler::lduw(const Address& a, Register d, int offset) { 687 if (a.has_index()) { assert(offset == 0, ""); lduw(a.base(), a.index(), d); } 688 else { lduw(a.base(), a.disp() + offset, d); } 689} 690inline void MacroAssembler::ldd( const Address& a, Register d, int offset) { 691 if (a.has_index()) { assert(offset == 0, ""); ldd( a.base(), a.index(), d); } 692 else { ldd( a.base(), a.disp() + offset, d); } 693} 694inline void MacroAssembler::ldx( const Address& a, Register d, int offset) { 695 if (a.has_index()) { assert(offset == 0, ""); ldx( a.base(), a.index(), d); } 696 else { ldx( a.base(), a.disp() + offset, d); } 697} 698 699inline void MacroAssembler::ldub(Register s1, RegisterOrConstant s2, Register d) { ldub(Address(s1, s2), d); } 700inline void MacroAssembler::ldsb(Register s1, RegisterOrConstant s2, Register d) { ldsb(Address(s1, s2), d); } 701inline void MacroAssembler::lduh(Register s1, RegisterOrConstant s2, Register d) { lduh(Address(s1, s2), d); } 702inline void MacroAssembler::ldsh(Register s1, RegisterOrConstant s2, Register d) { ldsh(Address(s1, s2), d); } 703inline void MacroAssembler::lduw(Register s1, RegisterOrConstant s2, Register d) { lduw(Address(s1, s2), d); } 704inline void MacroAssembler::ldsw(Register s1, RegisterOrConstant s2, Register d) { ldsw(Address(s1, s2), d); } 705inline void MacroAssembler::ldx( Register s1, RegisterOrConstant s2, Register d) { ldx( Address(s1, s2), d); } 706inline void MacroAssembler::ld( Register s1, RegisterOrConstant s2, Register d) { ld( Address(s1, s2), d); } 707inline void MacroAssembler::ldd( Register s1, RegisterOrConstant s2, Register d) { ldd( Address(s1, s2), d); } 708 709inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d) { 710 if (s2.is_register()) ldf(w, s1, s2.as_register(), d); 711 else ldf(w, s1, s2.as_constant(), d); 712} 713 714inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) { 715 relocate(a.rspec(offset)); 716 if (a.has_index()) { 717 assert(offset == 0, ""); 718 ldf(w, a.base(), a.index(), d); 719 } else { 720 ldf(w, a.base(), a.disp() + offset, d); 721 } 722} 723 724inline void MacroAssembler::lduwl(Register s1, Register s2, Register d) { lduwa(s1, s2, ASI_PRIMARY_LITTLE, d); } 725inline void MacroAssembler::ldswl(Register s1, Register s2, Register d) { ldswa(s1, s2, ASI_PRIMARY_LITTLE, d);} 726inline void MacroAssembler::ldxl( Register s1, Register s2, Register d) { ldxa(s1, s2, ASI_PRIMARY_LITTLE, d); } 727inline void MacroAssembler::ldfl(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { ldfa(w, s1, s2, ASI_PRIMARY_LITTLE, d); } 728 729// returns if membar generates anything, obviously this code should mirror 730// membar below. 731inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { 732 if (!os::is_MP()) 733 return false; // Not needed on single CPU 734 const Membar_mask_bits effective_mask = 735 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 736 return (effective_mask != 0); 737} 738 739inline void MacroAssembler::membar( Membar_mask_bits const7a ) { 740 // Uniprocessors do not need memory barriers 741 if (!os::is_MP()) 742 return; 743 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, 744 // 8.4.4.3, a.31 and a.50. 745 // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value 746 // of the mmask subfield of const7a that does anything that isn't done 747 // implicitly is StoreLoad. 748 const Membar_mask_bits effective_mask = 749 Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 750 if (effective_mask != 0) { 751 Assembler::membar(effective_mask); 752 } 753} 754 755inline void MacroAssembler::mov(Register s, Register d) { 756 if (s != d) { 757 or3(G0, s, d); 758 } else { 759 assert_not_delayed(); // Put something useful in the delay slot! 760 } 761} 762 763inline void MacroAssembler::mov_or_nop(Register s, Register d) { 764 if (s != d) { 765 or3(G0, s, d); 766 } else { 767 nop(); 768 } 769} 770 771inline void MacroAssembler::mov( int simm13a, Register d) { or3( G0, simm13a, d); } 772 773inline void MacroAssembler::prefetch(const Address& a, PrefetchFcn f, int offset) { 774 relocate(a.rspec(offset)); 775 assert(!a.has_index(), ""); 776 prefetch(a.base(), a.disp() + offset, f); 777} 778 779inline void MacroAssembler::st(Register d, Register s1, Register s2) { stw(d, s1, s2); } 780inline void MacroAssembler::st(Register d, Register s1, int simm13a) { stw(d, s1, simm13a); } 781 782#ifdef ASSERT 783// ByteSize is only a class when ASSERT is defined, otherwise it's an int. 784inline void MacroAssembler::st(Register d, Register s1, ByteSize simm13a) { stw(d, s1, in_bytes(simm13a)); } 785#endif 786 787inline void MacroAssembler::st(Register d, const Address& a, int offset) { 788 if (a.has_index()) { assert(offset == 0, ""); st( d, a.base(), a.index() ); } 789 else { st( d, a.base(), a.disp() + offset); } 790} 791 792inline void MacroAssembler::stb(Register d, const Address& a, int offset) { 793 if (a.has_index()) { assert(offset == 0, ""); stb(d, a.base(), a.index() ); } 794 else { stb(d, a.base(), a.disp() + offset); } 795} 796inline void MacroAssembler::sth(Register d, const Address& a, int offset) { 797 if (a.has_index()) { assert(offset == 0, ""); sth(d, a.base(), a.index() ); } 798 else { sth(d, a.base(), a.disp() + offset); } 799} 800inline void MacroAssembler::stw(Register d, const Address& a, int offset) { 801 if (a.has_index()) { assert(offset == 0, ""); stw(d, a.base(), a.index() ); } 802 else { stw(d, a.base(), a.disp() + offset); } 803} 804inline void MacroAssembler::std(Register d, const Address& a, int offset) { 805 if (a.has_index()) { assert(offset == 0, ""); std(d, a.base(), a.index() ); } 806 else { std(d, a.base(), a.disp() + offset); } 807} 808inline void MacroAssembler::stx(Register d, const Address& a, int offset) { 809 if (a.has_index()) { assert(offset == 0, ""); stx(d, a.base(), a.index() ); } 810 else { stx(d, a.base(), a.disp() + offset); } 811} 812 813inline void MacroAssembler::stb(Register d, Register s1, RegisterOrConstant s2) { stb(d, Address(s1, s2)); } 814inline void MacroAssembler::sth(Register d, Register s1, RegisterOrConstant s2) { sth(d, Address(s1, s2)); } 815inline void MacroAssembler::stw(Register d, Register s1, RegisterOrConstant s2) { stw(d, Address(s1, s2)); } 816inline void MacroAssembler::stx(Register d, Register s1, RegisterOrConstant s2) { stx(d, Address(s1, s2)); } 817inline void MacroAssembler::std(Register d, Register s1, RegisterOrConstant s2) { std(d, Address(s1, s2)); } 818inline void MacroAssembler::st( Register d, Register s1, RegisterOrConstant s2) { st( d, Address(s1, s2)); } 819 820inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2) { 821 if (s2.is_register()) stf(w, d, s1, s2.as_register()); 822 else stf(w, d, s1, s2.as_constant()); 823} 824 825inline void MacroAssembler::stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset) { 826 relocate(a.rspec(offset)); 827 if (a.has_index()) { assert(offset == 0, ""); stf(w, d, a.base(), a.index() ); } 828 else { stf(w, d, a.base(), a.disp() + offset); } 829} 830 831inline void MacroAssembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) { 832 if (s2.is_register()) sub(s1, s2.as_register(), d); 833 else { sub(s1, s2.as_constant() + offset, d); offset = 0; } 834 if (offset != 0) sub(d, offset, d); 835} 836 837inline void MacroAssembler::swap(const Address& a, Register d, int offset) { 838 relocate(a.rspec(offset)); 839 if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); } 840 else { swap(a.base(), a.disp() + offset, d); } 841} 842 843inline void MacroAssembler::bang_stack_with_offset(int offset) { 844 // stack grows down, caller passes positive offset 845 assert(offset > 0, "must bang with negative offset"); 846 set((-offset)+STACK_BIAS, G3_scratch); 847 st(G0, SP, G3_scratch); 848} 849 850#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP 851