Lines Matching defs:to

16  * 2 along with this work; if not, write to the Free Software Foundation,
84 // op codes such as ld or ldx, only access disp() to get
107 // On RISC, there's no benefit to verifying instruction boundaries.
111 // Patch instruction inst at offset inst_pos to refer to dest_pos
179 // nothing to do, (later) access of M[reg + offset]
196 // This code sequence is relocatable to any address, even on LP64.
202 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
216 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
222 // We use the current thread pointer to calculate a thread specific
223 // offset to write to within the page. This minimizes bus traffic
224 // due to cache line collision.
248 // Calls to C land
263 // call this when G2_thread is not known to be valid
265 save_frame(0); // to avoid clobbering O0
274 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0);
295 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod)
321 // smash G2_thread, as if the VM were about to anyway
348 // (Note: flags should always be zero before we get here so doesn't need to be set.)
351 // Verify that flags was zeroed on return to Java
353 save_frame(0); // to avoid clobbering O0
359 // Verify that flags was zeroed on return to Java
370 // will always be set to NULL. It is set here so that if we are doing a call to
371 // native (not VM) that we capture the known pc and don't have to rely on the
401 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame
409 // Always return last_Java_pc to zero
411 // Always null flags after return to Java
435 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
815 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
820 Assembler::sethi(bits_33to2, d); // hardware version zero-extends to upper 32
826 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
833 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
898 if (nWords & 1) ++nWords; // round up to double-word
933 // The trick here is to use precisely the same memory word
934 // that trap handlers also use to save the register.
936 // it works fine to save the register's value, whether or not
1124 // %%%%%% need to implement this
1129 // %%%%%% need to implement this
1133 // %%%%%% need to implement this
1159 // Call indirectly to solve generation ordering problem
1163 // Enough to hold 8 64-bit registers.
1170 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
1175 // Load address to call to into O7
1177 // Register call to verify_oop_subroutine
1197 // Call indirectly to solve generation ordering problem
1201 // Enough to hold 8 64-bit registers.
1208 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
1213 // Load address to call to into O7
1215 // Register call to verify_oop_subroutine
1235 // O0 is now the oop to be checked. O7 is the return address.
1327 // factor long stop-sequence into subroutine to save space
1330 // call indirectly to solve generation ordering problem
1339 // save frame first to get O7 for return address
1340 // add one word to size in case struct is odd number of words long
1349 // factor long stop-sequence into subroutine to save space
1352 // call indirectly to solve generation ordering problem
1361 // restore(); done in callee to save space!
1381 // We must be able to turn interactive prompting off
1382 // in order to run automated test scripts on the VM
1412 save_frame(); // one more save to free up another O7 register
1415 // We expect pointer to message in I1. Caller must set it up in O1
1441 // In order to get locks work, we need to fake a in_VM state
1665 // This code can be optimized to use the 64 bit shifts in V9.
1677 // more to take care of the special (rare) case where count is zero
1726 // This code can be optimized to use the 64 bit shifts in V9.
1738 // more to take care of the special (rare) case where count is zero
1789 // This code can be optimized to use the 64 bit shifts in V9.
1801 // more to take care of the special (rare) case where count is zero
1907 // load indirectly to solve generation ordering problem
2028 // On failure, execution transfers to the given label.
2045 assert(method_result->is_global(), "must be able to return value");
2097 // (invert the test to fall through to found_method...)
2110 // scan_temp[-scan_step] points to the vtable offset we need
2229 // We move this check to the front of the fast path because many
2356 // Load next super to check
2362 // A miss means we are NOT a subtype and need to keep looping
2422 // pointers to allow age to be placed into low bits
2446 // that we are not the bias owner in the current epoch. We need to
2447 // figure out more details about the state of the header in order to
2452 // the prototype header is no longer biased and we have to revoke
2459 // bits of the mark word are equal to the epoch bits of the
2461 // only change at a safepoint.) If not, attempt to rebias the object
2463 // that the current epoch is invalid in order to do this because
2470 // about the owner; it might be set or it might be clear. Try to
2472 // fails we will go in to the runtime to revoke the object's bias.
2482 // need to revoke that bias. The revocation will occur in the
2497 // circumstances _only_, we are allowed to use the current header's
2498 // value as the comparison value when doing the cas to acquire the
2500 // the bias from one thread to another directly in this situation.
2502 // FIXME: due to a lack of registers we currently blow away the age
2503 // bits in this situation. Should attempt to preserve them.
2510 // need to revoke that bias. The revocation will occur in the
2525 // to be biased any more. We are going to try to reset the mark of
2526 // this object to the prototype value and fall through to the
2529 // bias of this particular object, so it's okay to continue in the
2532 // FIXME: due to a lack of registers we currently blow away the age
2533 // bits in this situation. Should attempt to preserve them.
2537 // Fall through to the normal CAS-based lock, because no matter what
2551 // Note: we do not have to check the thread ID for two reasons.
2583 // extremely sensitive to the size of the code emitted by compiler_lock_object
2585 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
2618 // Save Rbox in Rscratch to be used for the cas operation
2621 // set Rmark to markOop | markOopDesc::unlocked_value
2662 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
2698 // Try to CAS m->owner from null to Self
2749 // on refworkload 0.83. If we need to reduce the size of the code
2753 // A more extreme idea is to always inflate on stack-lock recursion.
2756 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
2759 // control to the "slow" operators in synchronizer.cpp.
2778 // Try to CAS m->owner from null to Self
2828 // This could be related to inlining policies, code cache management, or
2857 // Emit code to check that _owner == Self
2902 // Do not bother to ratify that m->Owner == Self.
2912 // we should transfer control directly to the slow-path.
2914 // The logic is equivalent to :
2969 // would also need to check for orpan monitors and stranded threads.
2971 // Finally, inflation is also used when T2 needs to assign a hashCode
2972 // to O and O is stack-locked by T1. The "stomp" race could cause
2973 // an assigned hashCode value to be lost. We can avoid that condition
2995 // %%%%% need to implement this
2999 // %%%%% need to implement this
3003 // %%%%% need to implement this
3008 // %%%%% need to implement this
3013 // %%%%% need to implement this
3018 // %%%%% need to implement this
3023 // %%%%% need to implement this
3028 // %%%%% need to implement this
3071 Register obj, // result: pointer to object after successful allocation
3099 // try to allocate
3155 Register obj, // result: pointer to object after successful allocation
3233 // the amount free in the tlab is too large to discard.
3237 // increment waste limit to prevent getting stuck on this slow path
3277 // set klass to intArrayKlass
3414 // Writes to stack successive pages until offset reached to check for
3442 // touch a few more pages below. N.B. It is important to touch all
3443 // the way down to and including i=StackShadowPages.
3451 // testing if reserved zone needs to be enabled
3539 // This should be rare enough that we can afford to save all the
3620 // Do we need to load the previous value?
3642 // OK, it's not filtered, so we'll need to call enqueue. In the normal
3648 "Or we need to think harder.");
3670 // This gets to assume that o0 contains the object address.
3735 // This should be rare enough that we can afford to save all the
3788 // If the "store_addr" register is an "in" or "local" register, move it to
3998 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4010 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4058 // the instructions they generate change, then this method needs to be updated.
4074 // instr_size_for_decode_klass_not_null() needs to get updated.
4076 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4096 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4131 // Compress char[] to byte[] by compressing 16 bytes at once. Return 0 on failure.
4156 // Set mask to 0xff00 ff00 ff00 ff00 to check for non-latin1 characters
4170 // Although we have to move the data between integer and floating point registers, this is
4174 // annul zeroing if branch is not taken to preserve original count
4182 // Compress by copying one byte per char from ftmp1 and ftmp2 to ftmp3
4192 // annul LDX if branch is not taken to prevent access past end of string
4196 // Fallback to slow version
4200 // Compress char[] to byte[]. Return 0 on failure.
4210 // annul zeroing if branch is not taken to preserve original count
4216 // annul LDUH if branch is not taken to prevent access past end of string
4221 // Inflate byte[] to char[] by inflating 16 bytes at once.
4238 // Initialize float register to zero
4259 // annul LDX if branch is not taken to prevent access past end of string
4263 // Fallback to slow version
4267 // Inflate byte[] to char[].
4278 // annul LDUB if branch is not taken to prevent access past end of string
4315 // We need an additional register to keep track of two limits
4321 cmp(limit1, (int)0); // use cast to resolve overloading ambiguity
4325 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars
4369 delayed()->sra(diff, 1, result); // Divide by 2 to get number of chars
4374 // Shift str1 and str2 to the end of the arrays, negate limit
4396 // annul LDUB if branch is not taken to prevent access past end of string
4400 // If strings are equal up to min length, return the length difference.
4402 // Divide by 2 to get number of chars
4475 // Shift ary1 and ary2 to the end of the arrays, negate limit
4507 // Shift 'limit' bytes to the right and compare
4508 sll(limit, 3, limit); // bytes to bits
4528 Register lmask = t2; // t2 is aliased to lmask
4535 Assembler::sethi(0x80808000, t2); //! sethi macro fails to emit optimal
4564 sll(t4, 3, t4); // # bits to shift right, left -> t5,t4
4583 // i contains next index to be processed (adr. inp+i is on 8B boundary)
4601 // i contains next index to be processed (adr. inp+i is on 8B boundary)
4622 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
4644 delayed()->add(to, count, end);
4648 // Clean the beginning of space up to next cache line.
4650 stx(G0, to, offs);
4653 // align to next cache line
4654 add(to, cache_line_size, to);
4655 and3(to, -cache_line_size, to);
4659 // BIS should not be used to zero tail (64 bytes)
4660 // to avoid zeroing a header of the following object.
4665 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
4666 add(to, cache_line_size, to);
4667 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
4673 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
4677 stx(G0, to, 0);
4678 add(to, 8, to);
4679 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
4684 * Update CRC-32[C] with a byte value according to constants in table
4687 * @param [in]val Register containing the byte to fold into the CRC.
4805 // Check if below cutoff, proceed directly to cleanup code
4809 // Align buffer to 8 byte boundry
4818 // Alignment loop, table look up method for up to 7 bytes
4837 // At least 32 bytes left to process
4839 // Free up registers by storing them to FP registers
4844 // Determine which loop to enter
4889 // Fold 512 bits to 128 bits
4934 // 8 8-bit folds to compute 32-bit CRC.
4938 srl(tmp[1], G0, crc); // move 32 bits to general register
5014 // reverse the byte order of lower 32 bits to big endian, and move to FP side
5029 // schedule ldf's ahead of crc32c's to hide the load-use latency
5058 // move to INT side, and reverse the byte order of lower 32 bits to little endian
5076 // reverse the byte order to big endian, via stack, and move to FP side
5125 // move to INT side, and reverse the byte order of lower 32 bits to little endian