Lines Matching defs:to

17  * 2 along with this work; if not, write to the Free Software Foundation,
63 // Call stubs are used to call Java from C.
90 // offsets to fp
111 // STACK on entry to call_stub:
124 // Save non-volatile registers to ABI of caller frame.
154 // Add space required by arguments to frame size.
158 // Convert frame size from words to bytes.
197 // Any arguments to copy?
209 // Let r_argumentcopy_addr point to last outgoing Java argument.
212 // Let r_argument_addr point to last incoming Java argument.
237 // Register state on entry to frame manager / native entry:
248 // Z_esp points to the slot below the last argument.
252 // Stack on entry to frame manager / native entry:
272 // Now pop frame, process result, and return to caller.
293 // Pop frame. Done here to minimize stalls.
297 // to frame manager / native entry.
354 __ z_br(Z_R14); // Return to caller.
359 __ z_br(Z_R14); // Return to caller.
364 __ z_br(Z_R14); // Return to caller.
369 __ z_br(Z_R14); // Return to caller.
374 __ z_br(Z_R14); // Return to caller.
379 __ z_br(Z_R14); // Return to caller.
384 __ z_br(Z_R14); // Return to caller.
389 __ z_br(Z_R14); // Return to caller.
394 __ z_br(Z_R14); // Return to caller.
399 __ z_br(Z_R14); // Return to caller.
404 __ z_br(Z_R14); // Return to caller.
409 __ z_br(Z_R14); // Return to caller.
414 __ z_br(Z_R14); // Return to caller.
450 // Complete return to VM.
465 // Z_R14: pc the runtime library callee wants to return to.
531 // Jump to exception handler
544 // needs all registers to be preserved between the fault point and
554 // it needs to be properly traversed and ignored during GC, so we
653 assert(Universe::heap() != NULL, "java heap must be initialized to generate partial_subtype_check stub");
672 // Return address of code to be called from code generated by
755 // Tail call: call c and return to stub caller.
760 __ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
774 // Nothing to do if count <= 0.
870 // This is to test that the count register contains a positive int value.
871 // Required because C2 does not respect int to long conversion for stub calls.
880 // If no actual overlap is detected, control is transferred to the
887 // Z_ARG2 - to
907 // "from" and "to" addresses are assumed to be heapword aligned.
911 // to: Z_ARG2
1002 // Keep len test local to branch. Is generated only once.
1022 // Prefetch another cache line. We, for sure, have more than one line to copy.
1029 // Remember entry value of ARG2 to restore all arguments later from that knowledge.
1039 // Fall through to MVCLE case.
1065 __ z_srag(Z_ARG3, laddr_reg, log2_size); // Convert back to #elements.
1110 // to save expensive handling of trailing bytes.
1146 // just to double-check and to be on the safe side.
1162 // There is no suitable place after here to put the template.
1170 // #bytes to copy must be at least 256!!!
1181 __ z_lcgr(ix_reg, Z_R1); // Ix runs from -(n-2)*stride to 1*stride (inclusive).
1203 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3.
1228 // Fallthru to doMVCgeneral
1232 // Somewhat expensive due to use of EX instruction, but simple.
1251 __ z_srag(Z_ARG3, dst_reg, log2_size); // Convert back to #elements to restore ARG3
1273 // Somewhat expensive due to use of EX instruction, but simple. penalty: 9 ticks.
1314 // "from" and "to" addresses are assumed to be heapword aligned.
1318 // to: Z_ARG2
1369 // Preceding the main loop, some bytes are copied to obtain a DW-multiple remaining length.
1379 if (element_size == 8) // Nothing to do here.
1391 __ add2reg(ix_reg, -1); // Decrement delayed to avoid AGI.
1400 __ add2reg(ix_reg, -2); // Decrement delayed to avoid AGI.
1406 if (log2_size <= 2) { // There are just 4 bytes (left) that need to be copied.
1409 __ add2reg(ix_reg, -4); // Decrement delayed to avoid AGI.
1413 // Control can never get to here. Never! Never ever!
1435 // "from" and "to" addresses are assumed to be heapword aligned.
1440 // Refer to generate_disjoint_copy for a list of prereqs and features:
1450 // Refer to generate_disjoint_copy for a list of prereqs and features:
1460 // Refer to generate_disjoint_copy for a list of prereqs and features:
1470 // Refer to generate_disjoint_copy for a list of prereqs and features:
1480 // Refer to generate_disjoint_copy for a list of prereqs and features.
1497 // Refer to generate_conjoint_copy for a list of prereqs and features:
1502 array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint.
1512 // Refer to generate_conjoint_copy for a list of prereqs and features:
1517 array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint.
1526 // Refer to generate_conjoint_copy for a list of prereqs and features:
1532 array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint.
1541 // Refer to generate_conjoint_copy for a list of prereqs and features:
1547 array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint.
1556 // Refer to generate_conjoint_copy for a list of prereqs and features.
1564 // Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
1565 array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
1650 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed.
1651 // Z_ARG2 - destination data block. Ptr to leftmost byte to be stored.
1653 // to the same piece of storage.
1660 // The crypto key, as passed from the caller to these encryption stubs,
1678 // Z_R0 holds the crypto function code. Please refer to the KM/KMC instruction
1685 // Helper function which generates code to
1747 const int AES_parmBlk_addspace = 24; // Must be sufficiently large to hold all spilled registers
1752 // This len must be known at JIT compile time. Only then are we able to recalc the SP before resize.
1753 // We buy this knowledge by wasting some (up to AES_parmBlk_align) bytes of stack space.
1756 // Use parmBlk as temp reg here to hold the frame pointer.
1776 // Before returning, the stub has to copy the chaining value from
1778 // to the chaining value array the address of which was passed in the cv argument.
1779 // As all the available registers are used and modified by KMC, we need to save
1780 // the key length across the KMC instruction. We do so by spilling it to the stack,
1795 // cryptographic strength of the keys used to 128 bit. If we have AES hardware support
1835 // is copied back to the cv array as it is needed for subsequent cipher steps.
1836 // The keylen value as well as the original SP (before resizing) was pushed to the stack
1891 Register to = Z_ARG2; // destination byte array
1898 const Register parmBlk = Z_R1; // parameter block address (points to crypto key)
1906 // Copy arguments to registers as required by crypto instruction.
1909 __ z_lgr(dst, to); // Copy dst address, even register required.
1943 // chaining value and key to be, in this sequence, adjacent in storage. Thus, we need to allocate some
1945 // Stack space, on the contrary, is deallocated automatically when we return from the stub to the caller.
1951 // We align the parameter block to the next available octoword.
1957 Register to = Z_ARG2; // destination byte array (ciphered)
1960 const Register msglen = Z_ARG5; // Total length of the msg to be encrypted. Value must be returned
1965 const Register parmBlk = Z_R1; // parameter block address (points to crypto key)
1973 // Construct parm block address in parmBlk (== Z_R1), copy cv and key to parm block.
1979 __ z_lgr(dst, to);
2015 // Z_ARG1 - source data block. Ptr to leftmost byte to be processed.
2016 // Z_ARG2 - current SHA state. Ptr to state area. This area serves as
2020 // (Z_ARG4 - Z_ARG3) gives the #bytes remaining to be processed.
2025 // - All stubs, whether they are single-block or multi-block, are assumed to
2028 // Special end processing, as done by the KLMD instruction, seems to be
2038 // - The single-block stub is expected to digest exactly one data block, starting
2041 // - The multi-block stub is expected to digest all data blocks which start in
2043 // (srcLimit-srcOff), rounded up to the next multiple of the data block length,
2044 // gives the number of blocks to digest. It must be assumed that the calling code
2053 const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added).
2066 if (multiBlock) { // Process everything from offset to limit.
2071 // to inform the reader what must be happening hidden in the calling code.
2073 // The data block to be processed can have arbitrary length, i.e. its length does not
2074 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement
2076 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state
2077 // to the stack, execute a KLMD instruction on it and copy the result back to the
2080 // Total #srcBuff blocks to process.
2086 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit.
2089 __ z_sgfr(srcBufLen, srcOff); // SrcOff passed as int, now properly casted to long.
2096 // Integral #blocks to digest?
2099 // We insert an asm_assert into the KLMD case to guard against that.
2108 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA1_dataBlk); // #srcBuff bytes to process
2110 __ add2reg(Z_RET, (int)VM_Version::MsgDigest::_SHA1_dataBlk, srcOff); // Offset of first unprocessed byte in buffer. No 32 to 64 bit extension needed.
2120 // Security net: this stub is believed to be called for full-sized data blocks only
2121 // NOTE: The following code is believed to be correct, but is is not tested.
2147 if (multiBlock) { // Process everything from offset to limit.
2151 // to inform the reader what must be happening hidden in the calling code.
2153 // The data block to be processed can have arbitrary length, i.e. its length does not
2154 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement
2156 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state
2157 // to the stack, execute a KLMD instruction on it and copy the result back to the
2160 // total #srcBuff blocks to process
2166 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit.
2176 // Integral #blocks to digest?
2179 // We insert an asm_assert into the KLMD case to guard against that.
2188 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA256_dataBlk); // #srcBuff bytes to process
2199 // Security net: this stub is believed to be called for full-sized data blocks only.
2201 // The following code is believed to be correct, but is is not tested.
2227 if (multiBlock) { // Process everything from offset to limit.
2231 // to inform the reader what must be happening hidden in the calling code.
2233 // The data block to be processed can have arbitrary length, i.e. its length does not
2234 // need to be an integer multiple of SHA<n>_datablk. Therefore, we need to implement
2236 // to copy the SHA state back and forth. If the length is odd, we copy the SHA state
2237 // to the stack, execute a KLMD instruction on it and copy the result back to the
2240 // total #srcBuff blocks to process
2246 __ z_llgfr(srcBufLen, srcBufLen); // Cast to 64-bit.
2256 // integral #blocks to digest?
2259 // We insert an asm_assert into the KLMD case to guard against that.
2268 __ load_const_optimized(srcBufLen, (int)VM_Version::MsgDigest::_SHA512_dataBlk); // #srcBuff bytes to process
2279 // Security net: this stub is believed to be called for full-sized data blocks only
2281 // The following code is believed to be correct, but is is not tested.
2304 // arguments to kernel_crc32:
2307 Register dataLen = Z_ARG3; // #bytes to process, int
2320 __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
2321 __ z_stmg(Z_R10, Z_R13, 1*8, Z_SP); // Spill regs 10..11 to make them available as work registers.
2324 __ resize_frame(+(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
2404 __ z_br(Z_R14); // Return to caller.
2414 // platforms - however the benefit seems to be smaller than the
2453 // These entry points require SharedInfo::stack0 to be set up in non-core builds.
2522 // Put extra information in the stub code, to make it more readable.
2535 // code to cache lines. Use CodeEntryAlignment instead.