stubGenerator_ppc.cpp revision 13249:a2753984d2c1
1252206Sdavidcs/*
2252206Sdavidcs * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3252206Sdavidcs * Copyright (c) 2012, 2017, SAP SE. All rights reserved.
4252206Sdavidcs * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5252206Sdavidcs *
6252206Sdavidcs * This code is free software; you can redistribute it and/or modify it
7252206Sdavidcs * under the terms of the GNU General Public License version 2 only, as
8252206Sdavidcs * published by the Free Software Foundation.
9252206Sdavidcs *
10252206Sdavidcs * This code is distributed in the hope that it will be useful, but WITHOUT
11252206Sdavidcs * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12252206Sdavidcs * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13252206Sdavidcs * version 2 for more details (a copy is included in the LICENSE file that
14252206Sdavidcs * accompanied this code).
15252206Sdavidcs *
16252206Sdavidcs * You should have received a copy of the GNU General Public License version
17252206Sdavidcs * 2 along with this work; if not, write to the Free Software Foundation,
18252206Sdavidcs * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19252206Sdavidcs *
20252206Sdavidcs * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21252206Sdavidcs * or visit www.oracle.com if you need additional information or have any
22252206Sdavidcs * questions.
23252206Sdavidcs *
24252206Sdavidcs */
25252206Sdavidcs
26252206Sdavidcs#include "precompiled.hpp"
27252206Sdavidcs#include "asm/macroAssembler.inline.hpp"
28252206Sdavidcs#include "interpreter/interpreter.hpp"
29252206Sdavidcs#include "nativeInst_ppc.hpp"
30252206Sdavidcs#include "oops/instanceOop.hpp"
31252206Sdavidcs#include "oops/method.hpp"
32252206Sdavidcs#include "oops/objArrayKlass.hpp"
33252206Sdavidcs#include "oops/oop.inline.hpp"
34252206Sdavidcs#include "prims/methodHandles.hpp"
35252206Sdavidcs#include "runtime/frame.inline.hpp"
36252206Sdavidcs#include "runtime/handles.inline.hpp"
37252206Sdavidcs#include "runtime/sharedRuntime.hpp"
38252206Sdavidcs#include "runtime/stubCodeGenerator.hpp"
39252206Sdavidcs#include "runtime/stubRoutines.hpp"
40252206Sdavidcs#include "runtime/thread.inline.hpp"
41252206Sdavidcs#include "utilities/align.hpp"
42252206Sdavidcs
43252206Sdavidcs#define __ _masm->
44252206Sdavidcs
45252206Sdavidcs#ifdef PRODUCT
46252206Sdavidcs#define BLOCK_COMMENT(str) // nothing
47252206Sdavidcs#else
48252206Sdavidcs#define BLOCK_COMMENT(str) __ block_comment(str)
49252206Sdavidcs#endif
50252206Sdavidcs
51252206Sdavidcs#if defined(ABI_ELFv2)
52252206Sdavidcs#define STUB_ENTRY(name) StubRoutines::name()
53252206Sdavidcs#else
54252206Sdavidcs#define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
55252206Sdavidcs#endif
56252206Sdavidcs
57252206Sdavidcsclass StubGenerator: public StubCodeGenerator {
58252206Sdavidcs private:
59252206Sdavidcs
60252206Sdavidcs  // Call stubs are used to call Java from C
61252206Sdavidcs  //
62252206Sdavidcs  // Arguments:
63252206Sdavidcs  //
64252206Sdavidcs  //   R3  - call wrapper address     : address
65252206Sdavidcs  //   R4  - result                   : intptr_t*
66252206Sdavidcs  //   R5  - result type              : BasicType
67252206Sdavidcs  //   R6  - method                   : Method
68252206Sdavidcs  //   R7  - frame mgr entry point    : address
69252206Sdavidcs  //   R8  - parameter block          : intptr_t*
70252206Sdavidcs  //   R9  - parameter count in words : int
71252206Sdavidcs  //   R10 - thread                   : Thread*
72252206Sdavidcs  //
73252206Sdavidcs  address generate_call_stub(address& return_address) {
74252206Sdavidcs    // Setup a new c frame, copy java arguments, call frame manager or
75252206Sdavidcs    // native_entry, and process result.
76252206Sdavidcs
77252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", "call_stub");
78252206Sdavidcs
79252206Sdavidcs    address start = __ function_entry();
80252206Sdavidcs
81252206Sdavidcs    // some sanity checks
82252206Sdavidcs    assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
83252206Sdavidcs    assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
84252206Sdavidcs    assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
85252206Sdavidcs    assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
86252206Sdavidcs    assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
87252206Sdavidcs
88252206Sdavidcs    Register r_arg_call_wrapper_addr        = R3;
89252206Sdavidcs    Register r_arg_result_addr              = R4;
90252206Sdavidcs    Register r_arg_result_type              = R5;
91252206Sdavidcs    Register r_arg_method                   = R6;
92252206Sdavidcs    Register r_arg_entry                    = R7;
93252206Sdavidcs    Register r_arg_thread                   = R10;
94252206Sdavidcs
95252206Sdavidcs    Register r_temp                         = R24;
96252206Sdavidcs    Register r_top_of_arguments_addr        = R25;
97252206Sdavidcs    Register r_entryframe_fp                = R26;
98252206Sdavidcs
99252206Sdavidcs    {
100252206Sdavidcs      // Stack on entry to call_stub:
101252206Sdavidcs      //
102252206Sdavidcs      //      F1      [C_FRAME]
103252206Sdavidcs      //              ...
104252206Sdavidcs
105252206Sdavidcs      Register r_arg_argument_addr          = R8;
106252206Sdavidcs      Register r_arg_argument_count         = R9;
107252206Sdavidcs      Register r_frame_alignment_in_bytes   = R27;
108252206Sdavidcs      Register r_argument_addr              = R28;
109252206Sdavidcs      Register r_argumentcopy_addr          = R29;
110252206Sdavidcs      Register r_argument_size_in_bytes     = R30;
111252206Sdavidcs      Register r_frame_size                 = R23;
112252206Sdavidcs
113252206Sdavidcs      Label arguments_copied;
114252206Sdavidcs
115252206Sdavidcs      // Save LR/CR to caller's C_FRAME.
116252206Sdavidcs      __ save_LR_CR(R0);
117252206Sdavidcs
118252206Sdavidcs      // Zero extend arg_argument_count.
119252206Sdavidcs      __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
120252206Sdavidcs
121252206Sdavidcs      // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
122252206Sdavidcs      __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
123252206Sdavidcs
124252206Sdavidcs      // Keep copy of our frame pointer (caller's SP).
125252206Sdavidcs      __ mr(r_entryframe_fp, R1_SP);
126252206Sdavidcs
127252206Sdavidcs      BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
128252206Sdavidcs      // Push ENTRY_FRAME including arguments:
129252206Sdavidcs      //
130252206Sdavidcs      //      F0      [TOP_IJAVA_FRAME_ABI]
131252206Sdavidcs      //              alignment (optional)
132252206Sdavidcs      //              [outgoing Java arguments]
133252206Sdavidcs      //              [ENTRY_FRAME_LOCALS]
134252206Sdavidcs      //      F1      [C_FRAME]
135252206Sdavidcs      //              ...
136252206Sdavidcs
137252206Sdavidcs      // calculate frame size
138252206Sdavidcs
139252206Sdavidcs      // unaligned size of arguments
140252206Sdavidcs      __ sldi(r_argument_size_in_bytes,
141252206Sdavidcs                  r_arg_argument_count, Interpreter::logStackElementSize);
142252206Sdavidcs      // arguments alignment (max 1 slot)
143252206Sdavidcs      // FIXME: use round_to() here
144252206Sdavidcs      __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
145252206Sdavidcs      __ sldi(r_frame_alignment_in_bytes,
146252206Sdavidcs              r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
147252206Sdavidcs
148252206Sdavidcs      // size = unaligned size of arguments + top abi's size
149252206Sdavidcs      __ addi(r_frame_size, r_argument_size_in_bytes,
150252206Sdavidcs              frame::top_ijava_frame_abi_size);
151252206Sdavidcs      // size += arguments alignment
152252206Sdavidcs      __ add(r_frame_size,
153252206Sdavidcs             r_frame_size, r_frame_alignment_in_bytes);
154252206Sdavidcs      // size += size of call_stub locals
155252206Sdavidcs      __ addi(r_frame_size,
156252206Sdavidcs              r_frame_size, frame::entry_frame_locals_size);
157252206Sdavidcs
158252206Sdavidcs      // push ENTRY_FRAME
159252206Sdavidcs      __ push_frame(r_frame_size, r_temp);
160252206Sdavidcs
161252206Sdavidcs      // initialize call_stub locals (step 1)
162252206Sdavidcs      __ std(r_arg_call_wrapper_addr,
163252206Sdavidcs             _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
164252206Sdavidcs      __ std(r_arg_result_addr,
165252206Sdavidcs             _entry_frame_locals_neg(result_address), r_entryframe_fp);
166252206Sdavidcs      __ std(r_arg_result_type,
167252206Sdavidcs             _entry_frame_locals_neg(result_type), r_entryframe_fp);
168252206Sdavidcs      // we will save arguments_tos_address later
169252206Sdavidcs
170252206Sdavidcs
171252206Sdavidcs      BLOCK_COMMENT("Copy Java arguments");
172252206Sdavidcs      // copy Java arguments
173252206Sdavidcs
174252206Sdavidcs      // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
175252206Sdavidcs      // FIXME: why not simply use SP+frame::top_ijava_frame_size?
176252206Sdavidcs      __ addi(r_top_of_arguments_addr,
177252206Sdavidcs              R1_SP, frame::top_ijava_frame_abi_size);
178252206Sdavidcs      __ add(r_top_of_arguments_addr,
179252206Sdavidcs             r_top_of_arguments_addr, r_frame_alignment_in_bytes);
180252206Sdavidcs
181252206Sdavidcs      // any arguments to copy?
182252206Sdavidcs      __ cmpdi(CCR0, r_arg_argument_count, 0);
183252206Sdavidcs      __ beq(CCR0, arguments_copied);
184252206Sdavidcs
185252206Sdavidcs      // prepare loop and copy arguments in reverse order
186252206Sdavidcs      {
187252206Sdavidcs        // init CTR with arg_argument_count
188252206Sdavidcs        __ mtctr(r_arg_argument_count);
189252206Sdavidcs
190252206Sdavidcs        // let r_argumentcopy_addr point to last outgoing Java arguments P
191252206Sdavidcs        __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
192252206Sdavidcs
193252206Sdavidcs        // let r_argument_addr point to last incoming java argument
194252206Sdavidcs        __ add(r_argument_addr,
195252206Sdavidcs                   r_arg_argument_addr, r_argument_size_in_bytes);
196252206Sdavidcs        __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
197252206Sdavidcs
198252206Sdavidcs        // now loop while CTR > 0 and copy arguments
199252206Sdavidcs        {
200252206Sdavidcs          Label next_argument;
201252206Sdavidcs          __ bind(next_argument);
202252206Sdavidcs
203252206Sdavidcs          __ ld(r_temp, 0, r_argument_addr);
204252206Sdavidcs          // argument_addr--;
205252206Sdavidcs          __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
206252206Sdavidcs          __ std(r_temp, 0, r_argumentcopy_addr);
207252206Sdavidcs          // argumentcopy_addr++;
208252206Sdavidcs          __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
209252206Sdavidcs
210252206Sdavidcs          __ bdnz(next_argument);
211252206Sdavidcs        }
212252206Sdavidcs      }
213252206Sdavidcs
214252206Sdavidcs      // Arguments copied, continue.
215252206Sdavidcs      __ bind(arguments_copied);
216252206Sdavidcs    }
217252206Sdavidcs
218252206Sdavidcs    {
219252206Sdavidcs      BLOCK_COMMENT("Call frame manager or native entry.");
220252206Sdavidcs      // Call frame manager or native entry.
221252206Sdavidcs      Register r_new_arg_entry = R14;
222252206Sdavidcs      assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
223252206Sdavidcs                                 r_arg_method, r_arg_thread);
224252206Sdavidcs
225252206Sdavidcs      __ mr(r_new_arg_entry, r_arg_entry);
226252206Sdavidcs
227252206Sdavidcs      // Register state on entry to frame manager / native entry:
228252206Sdavidcs      //
229252206Sdavidcs      //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
230252206Sdavidcs      //   R19_method  -  Method
231252206Sdavidcs      //   R16_thread  -  JavaThread*
232252206Sdavidcs
233252206Sdavidcs      // Tos must point to last argument - element_size.
234252206Sdavidcs      const Register tos = R15_esp;
235252206Sdavidcs
236252206Sdavidcs      __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
237252206Sdavidcs
238252206Sdavidcs      // initialize call_stub locals (step 2)
239252206Sdavidcs      // now save tos as arguments_tos_address
240252206Sdavidcs      __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
241252206Sdavidcs
242252206Sdavidcs      // load argument registers for call
243252206Sdavidcs      __ mr(R19_method, r_arg_method);
244252206Sdavidcs      __ mr(R16_thread, r_arg_thread);
245252206Sdavidcs      assert(tos != r_arg_method, "trashed r_arg_method");
246252206Sdavidcs      assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
247252206Sdavidcs
248252206Sdavidcs      // Set R15_prev_state to 0 for simplifying checks in callee.
249252206Sdavidcs      __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
250252206Sdavidcs      // Stack on entry to frame manager / native entry:
251252206Sdavidcs      //
252252206Sdavidcs      //      F0      [TOP_IJAVA_FRAME_ABI]
253252206Sdavidcs      //              alignment (optional)
254252206Sdavidcs      //              [outgoing Java arguments]
255252206Sdavidcs      //              [ENTRY_FRAME_LOCALS]
256252206Sdavidcs      //      F1      [C_FRAME]
257252206Sdavidcs      //              ...
258252206Sdavidcs      //
259252206Sdavidcs
260252206Sdavidcs      // global toc register
261252206Sdavidcs      __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1);
262252206Sdavidcs      // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
263252206Sdavidcs      // when called via a c2i.
264252206Sdavidcs
265252206Sdavidcs      // Pass initial_caller_sp to framemanager.
266252206Sdavidcs      __ mr(R21_tmp1, R1_SP);
267252206Sdavidcs
268252206Sdavidcs      // Do a light-weight C-call here, r_new_arg_entry holds the address
269252206Sdavidcs      // of the interpreter entry point (frame manager or native entry)
270252206Sdavidcs      // and save runtime-value of LR in return_address.
271252206Sdavidcs      assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
272252206Sdavidcs             "trashed r_new_arg_entry");
273252206Sdavidcs      return_address = __ call_stub(r_new_arg_entry);
274252206Sdavidcs    }
275252206Sdavidcs
276252206Sdavidcs    {
277252206Sdavidcs      BLOCK_COMMENT("Returned from frame manager or native entry.");
278252206Sdavidcs      // Returned from frame manager or native entry.
279252206Sdavidcs      // Now pop frame, process result, and return to caller.
280252206Sdavidcs
281252206Sdavidcs      // Stack on exit from frame manager / native entry:
282252206Sdavidcs      //
283252206Sdavidcs      //      F0      [ABI]
284252206Sdavidcs      //              ...
285252206Sdavidcs      //              [ENTRY_FRAME_LOCALS]
286252206Sdavidcs      //      F1      [C_FRAME]
287252206Sdavidcs      //              ...
288252206Sdavidcs      //
289252206Sdavidcs      // Just pop the topmost frame ...
290252206Sdavidcs      //
291252206Sdavidcs
292252206Sdavidcs      Label ret_is_object;
293252206Sdavidcs      Label ret_is_long;
294252206Sdavidcs      Label ret_is_float;
295252206Sdavidcs      Label ret_is_double;
296252206Sdavidcs
297252206Sdavidcs      Register r_entryframe_fp = R30;
298252206Sdavidcs      Register r_lr            = R7_ARG5;
299252206Sdavidcs      Register r_cr            = R8_ARG6;
300252206Sdavidcs
301252206Sdavidcs      // Reload some volatile registers which we've spilled before the call
302252206Sdavidcs      // to frame manager / native entry.
303252206Sdavidcs      // Access all locals via frame pointer, because we know nothing about
304252206Sdavidcs      // the topmost frame's size.
305252206Sdavidcs      __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
306252206Sdavidcs      assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
307252206Sdavidcs      __ ld(r_arg_result_addr,
308252206Sdavidcs            _entry_frame_locals_neg(result_address), r_entryframe_fp);
309252206Sdavidcs      __ ld(r_arg_result_type,
310252206Sdavidcs            _entry_frame_locals_neg(result_type), r_entryframe_fp);
311252206Sdavidcs      __ ld(r_cr, _abi(cr), r_entryframe_fp);
312252206Sdavidcs      __ ld(r_lr, _abi(lr), r_entryframe_fp);
313252206Sdavidcs
314252206Sdavidcs      // pop frame and restore non-volatiles, LR and CR
315252206Sdavidcs      __ mr(R1_SP, r_entryframe_fp);
316252206Sdavidcs      __ mtcr(r_cr);
317252206Sdavidcs      __ mtlr(r_lr);
318252206Sdavidcs
319252206Sdavidcs      // Store result depending on type. Everything that is not
320252206Sdavidcs      // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
321252206Sdavidcs      __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
322252206Sdavidcs      __ cmpwi(CCR1, r_arg_result_type, T_LONG);
323252206Sdavidcs      __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
324252206Sdavidcs      __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
325252206Sdavidcs
326252206Sdavidcs      // restore non-volatile registers
327252206Sdavidcs      __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
328252206Sdavidcs
329252206Sdavidcs
330252206Sdavidcs      // Stack on exit from call_stub:
331252206Sdavidcs      //
332252206Sdavidcs      //      0       [C_FRAME]
333252206Sdavidcs      //              ...
334252206Sdavidcs      //
335252206Sdavidcs      //  no call_stub frames left.
336252206Sdavidcs
337252206Sdavidcs      // All non-volatiles have been restored at this point!!
338252206Sdavidcs      assert(R3_RET == R3, "R3_RET should be R3");
339252206Sdavidcs
340252206Sdavidcs      __ beq(CCR0, ret_is_object);
341252206Sdavidcs      __ beq(CCR1, ret_is_long);
342252206Sdavidcs      __ beq(CCR5, ret_is_float);
343252206Sdavidcs      __ beq(CCR6, ret_is_double);
344252206Sdavidcs
345252206Sdavidcs      // default:
346252206Sdavidcs      __ stw(R3_RET, 0, r_arg_result_addr);
347252206Sdavidcs      __ blr(); // return to caller
348252206Sdavidcs
349252206Sdavidcs      // case T_OBJECT:
350252206Sdavidcs      __ bind(ret_is_object);
351252206Sdavidcs      __ std(R3_RET, 0, r_arg_result_addr);
352252206Sdavidcs      __ blr(); // return to caller
353252206Sdavidcs
354252206Sdavidcs      // case T_LONG:
355252206Sdavidcs      __ bind(ret_is_long);
356252206Sdavidcs      __ std(R3_RET, 0, r_arg_result_addr);
357252206Sdavidcs      __ blr(); // return to caller
358252206Sdavidcs
359252206Sdavidcs      // case T_FLOAT:
360252206Sdavidcs      __ bind(ret_is_float);
361252206Sdavidcs      __ stfs(F1_RET, 0, r_arg_result_addr);
362252206Sdavidcs      __ blr(); // return to caller
363252206Sdavidcs
364252206Sdavidcs      // case T_DOUBLE:
365252206Sdavidcs      __ bind(ret_is_double);
366252206Sdavidcs      __ stfd(F1_RET, 0, r_arg_result_addr);
367252206Sdavidcs      __ blr(); // return to caller
368252206Sdavidcs    }
369252206Sdavidcs
370252206Sdavidcs    return start;
371252206Sdavidcs  }
372252206Sdavidcs
373252206Sdavidcs  // Return point for a Java call if there's an exception thrown in
374252206Sdavidcs  // Java code.  The exception is caught and transformed into a
375252206Sdavidcs  // pending exception stored in JavaThread that can be tested from
376252206Sdavidcs  // within the VM.
377252206Sdavidcs  //
378252206Sdavidcs  address generate_catch_exception() {
379252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", "catch_exception");
380252206Sdavidcs
381252206Sdavidcs    address start = __ pc();
382252206Sdavidcs
383252206Sdavidcs    // Registers alive
384252206Sdavidcs    //
385252206Sdavidcs    //  R16_thread
386252206Sdavidcs    //  R3_ARG1 - address of pending exception
387252206Sdavidcs    //  R4_ARG2 - return address in call stub
388252206Sdavidcs
389252206Sdavidcs    const Register exception_file = R21_tmp1;
390252206Sdavidcs    const Register exception_line = R22_tmp2;
391252206Sdavidcs
392252206Sdavidcs    __ load_const(exception_file, (void*)__FILE__);
393252206Sdavidcs    __ load_const(exception_line, (void*)__LINE__);
394252206Sdavidcs
395252206Sdavidcs    __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
396252206Sdavidcs    // store into `char *'
397252206Sdavidcs    __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread);
398252206Sdavidcs    // store into `int'
399252206Sdavidcs    __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
400252206Sdavidcs
401252206Sdavidcs    // complete return to VM
402252206Sdavidcs    assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
403252206Sdavidcs
404252206Sdavidcs    __ mtlr(R4_ARG2);
405252206Sdavidcs    // continue in call stub
406252206Sdavidcs    __ blr();
407252206Sdavidcs
408252206Sdavidcs    return start;
409252206Sdavidcs  }
410252206Sdavidcs
411252206Sdavidcs  // Continuation point for runtime calls returning with a pending
412252206Sdavidcs  // exception.  The pending exception check happened in the runtime
413252206Sdavidcs  // or native call stub.  The pending exception in Thread is
414252206Sdavidcs  // converted into a Java-level exception.
415252206Sdavidcs  //
416252206Sdavidcs  // Read:
417252206Sdavidcs  //
418252206Sdavidcs  //   LR:     The pc the runtime library callee wants to return to.
419252206Sdavidcs  //           Since the exception occurred in the callee, the return pc
420252206Sdavidcs  //           from the point of view of Java is the exception pc.
421252206Sdavidcs  //   thread: Needed for method handles.
422252206Sdavidcs  //
423252206Sdavidcs  // Invalidate:
424252206Sdavidcs  //
425252206Sdavidcs  //   volatile registers (except below).
426252206Sdavidcs  //
427252206Sdavidcs  // Update:
428252206Sdavidcs  //
429252206Sdavidcs  //   R4_ARG2: exception
430252206Sdavidcs  //
431252206Sdavidcs  // (LR is unchanged and is live out).
432252206Sdavidcs  //
433252206Sdavidcs  address generate_forward_exception() {
434252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", "forward_exception");
435252206Sdavidcs    address start = __ pc();
436252206Sdavidcs
437252206Sdavidcs#if !defined(PRODUCT)
438252206Sdavidcs    if (VerifyOops) {
439252206Sdavidcs      // Get pending exception oop.
440252206Sdavidcs      __ ld(R3_ARG1,
441252206Sdavidcs                in_bytes(Thread::pending_exception_offset()),
442252206Sdavidcs                R16_thread);
443252206Sdavidcs      // Make sure that this code is only executed if there is a pending exception.
444252206Sdavidcs      {
445252206Sdavidcs        Label L;
446252206Sdavidcs        __ cmpdi(CCR0, R3_ARG1, 0);
447252206Sdavidcs        __ bne(CCR0, L);
448252206Sdavidcs        __ stop("StubRoutines::forward exception: no pending exception (1)");
449252206Sdavidcs        __ bind(L);
450252206Sdavidcs      }
451252206Sdavidcs      __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
452252206Sdavidcs    }
453252206Sdavidcs#endif
454252206Sdavidcs
455252206Sdavidcs    // Save LR/CR and copy exception pc (LR) into R4_ARG2.
456252206Sdavidcs    __ save_LR_CR(R4_ARG2);
457252206Sdavidcs    __ push_frame_reg_args(0, R0);
458252206Sdavidcs    // Find exception handler.
459252206Sdavidcs    __ call_VM_leaf(CAST_FROM_FN_PTR(address,
460252206Sdavidcs                     SharedRuntime::exception_handler_for_return_address),
461252206Sdavidcs                    R16_thread,
462252206Sdavidcs                    R4_ARG2);
463252206Sdavidcs    // Copy handler's address.
464252206Sdavidcs    __ mtctr(R3_RET);
465252206Sdavidcs    __ pop_frame();
466252206Sdavidcs    __ restore_LR_CR(R0);
467252206Sdavidcs
468252206Sdavidcs    // Set up the arguments for the exception handler:
469252206Sdavidcs    //  - R3_ARG1: exception oop
470252206Sdavidcs    //  - R4_ARG2: exception pc.
471252206Sdavidcs
472252206Sdavidcs    // Load pending exception oop.
473252206Sdavidcs    __ ld(R3_ARG1,
474252206Sdavidcs              in_bytes(Thread::pending_exception_offset()),
475252206Sdavidcs              R16_thread);
476252206Sdavidcs
477252206Sdavidcs    // The exception pc is the return address in the caller.
478252206Sdavidcs    // Must load it into R4_ARG2.
479252206Sdavidcs    __ mflr(R4_ARG2);
480252206Sdavidcs
481252206Sdavidcs#ifdef ASSERT
482252206Sdavidcs    // Make sure exception is set.
483252206Sdavidcs    {
484252206Sdavidcs      Label L;
485252206Sdavidcs      __ cmpdi(CCR0, R3_ARG1, 0);
486252206Sdavidcs      __ bne(CCR0, L);
487252206Sdavidcs      __ stop("StubRoutines::forward exception: no pending exception (2)");
488252206Sdavidcs      __ bind(L);
489252206Sdavidcs    }
490252206Sdavidcs#endif
491252206Sdavidcs
492252206Sdavidcs    // Clear the pending exception.
493252206Sdavidcs    __ li(R0, 0);
494252206Sdavidcs    __ std(R0,
495252206Sdavidcs               in_bytes(Thread::pending_exception_offset()),
496252206Sdavidcs               R16_thread);
497252206Sdavidcs    // Jump to exception handler.
498252206Sdavidcs    __ bctr();
499252206Sdavidcs
500252206Sdavidcs    return start;
501252206Sdavidcs  }
502252206Sdavidcs
503252206Sdavidcs#undef __
504252206Sdavidcs#define __ masm->
505252206Sdavidcs  // Continuation point for throwing of implicit exceptions that are
506252206Sdavidcs  // not handled in the current activation. Fabricates an exception
507252206Sdavidcs  // oop and initiates normal exception dispatching in this
508252206Sdavidcs  // frame. Only callee-saved registers are preserved (through the
509252206Sdavidcs  // normal register window / RegisterMap handling).  If the compiler
510252206Sdavidcs  // needs all registers to be preserved between the fault point and
511252206Sdavidcs  // the exception handler then it must assume responsibility for that
512252206Sdavidcs  // in AbstractCompiler::continuation_for_implicit_null_exception or
513252206Sdavidcs  // continuation_for_implicit_division_by_zero_exception. All other
514252206Sdavidcs  // implicit exceptions (e.g., NullPointerException or
515252206Sdavidcs  // AbstractMethodError on entry) are either at call sites or
516252206Sdavidcs  // otherwise assume that stack unwinding will be initiated, so
517252206Sdavidcs  // caller saved registers were assumed volatile in the compiler.
518252206Sdavidcs  //
519252206Sdavidcs  // Note that we generate only this stub into a RuntimeStub, because
520252206Sdavidcs  // it needs to be properly traversed and ignored during GC, so we
521252206Sdavidcs  // change the meaning of the "__" macro within this method.
522252206Sdavidcs  //
523252206Sdavidcs  // Note: the routine set_pc_not_at_call_for_caller in
524252206Sdavidcs  // SharedRuntime.cpp requires that this code be generated into a
525252206Sdavidcs  // RuntimeStub.
526252206Sdavidcs  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
527252206Sdavidcs                                   Register arg1 = noreg, Register arg2 = noreg) {
528252206Sdavidcs    CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
529252206Sdavidcs    MacroAssembler* masm = new MacroAssembler(&code);
530252206Sdavidcs
531252206Sdavidcs    OopMapSet* oop_maps  = new OopMapSet();
532252206Sdavidcs    int frame_size_in_bytes = frame::abi_reg_args_size;
533252206Sdavidcs    OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
534252206Sdavidcs
535252206Sdavidcs    address start = __ pc();
536252206Sdavidcs
537252206Sdavidcs    __ save_LR_CR(R11_scratch1);
538252206Sdavidcs
539252206Sdavidcs    // Push a frame.
540252206Sdavidcs    __ push_frame_reg_args(0, R11_scratch1);
541252206Sdavidcs
542252206Sdavidcs    address frame_complete_pc = __ pc();
543252206Sdavidcs
544252206Sdavidcs    if (restore_saved_exception_pc) {
545252206Sdavidcs      __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
546252206Sdavidcs    }
547252206Sdavidcs
548252206Sdavidcs    // Note that we always have a runtime stub frame on the top of
549252206Sdavidcs    // stack by this point. Remember the offset of the instruction
550252206Sdavidcs    // whose address will be moved to R11_scratch1.
551252206Sdavidcs    address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
552252206Sdavidcs
553252206Sdavidcs    __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
554252206Sdavidcs
555252206Sdavidcs    __ mr(R3_ARG1, R16_thread);
556252206Sdavidcs    if (arg1 != noreg) {
557252206Sdavidcs      __ mr(R4_ARG2, arg1);
558252206Sdavidcs    }
559252206Sdavidcs    if (arg2 != noreg) {
560252206Sdavidcs      __ mr(R5_ARG3, arg2);
561252206Sdavidcs    }
562252206Sdavidcs#if defined(ABI_ELFv2)
563252206Sdavidcs    __ call_c(runtime_entry, relocInfo::none);
564252206Sdavidcs#else
565252206Sdavidcs    __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
566252206Sdavidcs#endif
567252206Sdavidcs
568252206Sdavidcs    // Set an oopmap for the call site.
569252206Sdavidcs    oop_maps->add_gc_map((int)(gc_map_pc - start), map);
570252206Sdavidcs
571252206Sdavidcs    __ reset_last_Java_frame();
572252206Sdavidcs
573252206Sdavidcs#ifdef ASSERT
574252206Sdavidcs    // Make sure that this code is only executed if there is a pending
575252206Sdavidcs    // exception.
576252206Sdavidcs    {
577252206Sdavidcs      Label L;
578252206Sdavidcs      __ ld(R0,
579252206Sdavidcs                in_bytes(Thread::pending_exception_offset()),
580252206Sdavidcs                R16_thread);
581252206Sdavidcs      __ cmpdi(CCR0, R0, 0);
582252206Sdavidcs      __ bne(CCR0, L);
583252206Sdavidcs      __ stop("StubRoutines::throw_exception: no pending exception");
584252206Sdavidcs      __ bind(L);
585252206Sdavidcs    }
586252206Sdavidcs#endif
587252206Sdavidcs
588252206Sdavidcs    // Pop frame.
589252206Sdavidcs    __ pop_frame();
590252206Sdavidcs
591252206Sdavidcs    __ restore_LR_CR(R11_scratch1);
592252206Sdavidcs
593252206Sdavidcs    __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
594252206Sdavidcs    __ mtctr(R11_scratch1);
595252206Sdavidcs    __ bctr();
596252206Sdavidcs
597252206Sdavidcs    // Create runtime stub with OopMap.
598252206Sdavidcs    RuntimeStub* stub =
599252206Sdavidcs      RuntimeStub::new_runtime_stub(name, &code,
600252206Sdavidcs                                    /*frame_complete=*/ (int)(frame_complete_pc - start),
601252206Sdavidcs                                    frame_size_in_bytes/wordSize,
602252206Sdavidcs                                    oop_maps,
603252206Sdavidcs                                    false);
604252206Sdavidcs    return stub->entry_point();
605252206Sdavidcs  }
606252206Sdavidcs#undef __
607252206Sdavidcs#define __ _masm->
608252206Sdavidcs
609252206Sdavidcs  //  Generate G1 pre-write barrier for array.
610252206Sdavidcs  //
611252206Sdavidcs  //  Input:
612252206Sdavidcs  //     from     - register containing src address (only needed for spilling)
613252206Sdavidcs  //     to       - register containing starting address
614252206Sdavidcs  //     count    - register containing element count
615252206Sdavidcs  //     tmp      - scratch register
616252206Sdavidcs  //
617252206Sdavidcs  //  Kills:
618252206Sdavidcs  //     nothing
619252206Sdavidcs  //
620252206Sdavidcs  void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
621252206Sdavidcs                                       Register preserve1 = noreg, Register preserve2 = noreg) {
622252206Sdavidcs    BarrierSet* const bs = Universe::heap()->barrier_set();
623252206Sdavidcs    switch (bs->kind()) {
624252206Sdavidcs      case BarrierSet::G1SATBCTLogging:
625252206Sdavidcs        // With G1, don't generate the call if we statically know that the target in uninitialized
626252206Sdavidcs        if (!dest_uninitialized) {
627252206Sdavidcs          int spill_slots = 3;
628252206Sdavidcs          if (preserve1 != noreg) { spill_slots++; }
629252206Sdavidcs          if (preserve2 != noreg) { spill_slots++; }
630252206Sdavidcs          const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
631252206Sdavidcs          Label filtered;
632252206Sdavidcs
633252206Sdavidcs          // Is marking active?
634252206Sdavidcs          if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
635252206Sdavidcs            __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
636252206Sdavidcs          } else {
637252206Sdavidcs            guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
638252206Sdavidcs            __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
639252206Sdavidcs          }
640252206Sdavidcs          __ cmpdi(CCR0, Rtmp1, 0);
641252206Sdavidcs          __ beq(CCR0, filtered);
642252206Sdavidcs
643252206Sdavidcs          __ save_LR_CR(R0);
644252206Sdavidcs          __ push_frame(frame_size, R0);
645252206Sdavidcs          int slot_nr = 0;
646252206Sdavidcs          __ std(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
647252206Sdavidcs          __ std(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
648252206Sdavidcs          __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
649252206Sdavidcs          if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
650252206Sdavidcs          if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
651252206Sdavidcs
652252206Sdavidcs          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
653252206Sdavidcs
654252206Sdavidcs          slot_nr = 0;
655252206Sdavidcs          __ ld(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
656252206Sdavidcs          __ ld(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
657252206Sdavidcs          __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
658252206Sdavidcs          if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
659252206Sdavidcs          if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
660252206Sdavidcs          __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
661252206Sdavidcs          __ restore_LR_CR(R0);
662252206Sdavidcs
663252206Sdavidcs          __ bind(filtered);
664252206Sdavidcs        }
665252206Sdavidcs        break;
666252206Sdavidcs      case BarrierSet::CardTableForRS:
667252206Sdavidcs      case BarrierSet::CardTableExtension:
668252206Sdavidcs      case BarrierSet::ModRef:
669252206Sdavidcs        break;
670252206Sdavidcs      default:
671252206Sdavidcs        ShouldNotReachHere();
672252206Sdavidcs    }
673252206Sdavidcs  }
674252206Sdavidcs
675252206Sdavidcs  //  Generate CMS/G1 post-write barrier for array.
676252206Sdavidcs  //
677252206Sdavidcs  //  Input:
678252206Sdavidcs  //     addr     - register containing starting address
679252206Sdavidcs  //     count    - register containing element count
680252206Sdavidcs  //     tmp      - scratch register
681252206Sdavidcs  //
682252206Sdavidcs  //  The input registers and R0 are overwritten.
683252206Sdavidcs  //
684252206Sdavidcs  void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
685252206Sdavidcs    BarrierSet* const bs = Universe::heap()->barrier_set();
686252206Sdavidcs
687252206Sdavidcs    switch (bs->kind()) {
688252206Sdavidcs      case BarrierSet::G1SATBCTLogging:
689252206Sdavidcs        {
690252206Sdavidcs          int spill_slots = (preserve != noreg) ? 1 : 0;
691252206Sdavidcs          const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
692252206Sdavidcs
693252206Sdavidcs          __ save_LR_CR(R0);
694252206Sdavidcs          __ push_frame(frame_size, R0);
695252206Sdavidcs          if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
696252206Sdavidcs          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
697252206Sdavidcs          if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
698252206Sdavidcs          __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
699252206Sdavidcs          __ restore_LR_CR(R0);
700252206Sdavidcs        }
701252206Sdavidcs        break;
702252206Sdavidcs      case BarrierSet::CardTableForRS:
703252206Sdavidcs      case BarrierSet::CardTableExtension:
704252206Sdavidcs        {
705252206Sdavidcs          Label Lskip_loop, Lstore_loop;
706252206Sdavidcs          if (UseConcMarkSweepGC) {
707252206Sdavidcs            // TODO PPC port: contribute optimization / requires shared changes
708252206Sdavidcs            __ release();
709252206Sdavidcs          }
710252206Sdavidcs
711252206Sdavidcs          CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
712252206Sdavidcs          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
713252206Sdavidcs          assert_different_registers(addr, count, tmp);
714252206Sdavidcs
715252206Sdavidcs          __ sldi(count, count, LogBytesPerHeapOop);
716252206Sdavidcs          __ addi(count, count, -BytesPerHeapOop);
717252206Sdavidcs          __ add(count, addr, count);
718252206Sdavidcs          // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
719252206Sdavidcs          __ srdi(addr, addr, CardTableModRefBS::card_shift);
720252206Sdavidcs          __ srdi(count, count, CardTableModRefBS::card_shift);
721252206Sdavidcs          __ subf(count, addr, count);
722252206Sdavidcs          assert_different_registers(R0, addr, count, tmp);
723252206Sdavidcs          __ load_const(tmp, (address)ct->byte_map_base);
724252206Sdavidcs          __ addic_(count, count, 1);
725252206Sdavidcs          __ beq(CCR0, Lskip_loop);
726252206Sdavidcs          __ li(R0, 0);
727252206Sdavidcs          __ mtctr(count);
728252206Sdavidcs          // Byte store loop
729252206Sdavidcs          __ bind(Lstore_loop);
730252206Sdavidcs          __ stbx(R0, tmp, addr);
731252206Sdavidcs          __ addi(addr, addr, 1);
732252206Sdavidcs          __ bdnz(Lstore_loop);
733252206Sdavidcs          __ bind(Lskip_loop);
734252206Sdavidcs        }
735252206Sdavidcs      break;
736252206Sdavidcs      case BarrierSet::ModRef:
737252206Sdavidcs        break;
738252206Sdavidcs      default:
739252206Sdavidcs        ShouldNotReachHere();
740252206Sdavidcs    }
741252206Sdavidcs  }
742252206Sdavidcs
743252206Sdavidcs  // Support for void zero_words_aligned8(HeapWord* to, size_t count)
744252206Sdavidcs  //
745252206Sdavidcs  // Arguments:
746252206Sdavidcs  //   to:
747252206Sdavidcs  //   count:
748252206Sdavidcs  //
749252206Sdavidcs  // Destroys:
750252206Sdavidcs  //
751252206Sdavidcs  address generate_zero_words_aligned8() {
752252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
753252206Sdavidcs
754252206Sdavidcs    // Implemented as in ClearArray.
755252206Sdavidcs    address start = __ function_entry();
756252206Sdavidcs
757252206Sdavidcs    Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
758252206Sdavidcs    Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
759252206Sdavidcs    Register tmp1_reg       = R5_ARG3;
760252206Sdavidcs    Register tmp2_reg       = R6_ARG4;
761252206Sdavidcs    Register zero_reg       = R7_ARG5;
762252206Sdavidcs
763252206Sdavidcs    // Procedure for large arrays (uses data cache block zero instruction).
764252206Sdavidcs    Label dwloop, fast, fastloop, restloop, lastdword, done;
765252206Sdavidcs    int cl_size = VM_Version::L1_data_cache_line_size();
766252206Sdavidcs    int cl_dwords = cl_size >> 3;
767252206Sdavidcs    int cl_dwordaddr_bits = exact_log2(cl_dwords);
768252206Sdavidcs    int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
769252206Sdavidcs
770252206Sdavidcs    // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
771252206Sdavidcs    __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
772252206Sdavidcs    __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
773252206Sdavidcs    __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
774252206Sdavidcs    __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
775252206Sdavidcs
776252206Sdavidcs    __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
777252206Sdavidcs    __ beq(CCR0, lastdword);                    // size <= 1
778252206Sdavidcs    __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
779252206Sdavidcs    __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
780252206Sdavidcs    __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
781252206Sdavidcs
782252206Sdavidcs    __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
783252206Sdavidcs    __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
784252206Sdavidcs
785252206Sdavidcs    __ beq(CCR0, fast);                         // already 128byte aligned
786252206Sdavidcs    __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
787252206Sdavidcs    __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
788252206Sdavidcs
789252206Sdavidcs    // Clear in first cache line dword-by-dword if not already 128byte aligned.
790252206Sdavidcs    __ bind(dwloop);
791252206Sdavidcs      __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
792252206Sdavidcs      __ addi(base_ptr_reg, base_ptr_reg, 8);
793252206Sdavidcs    __ bdnz(dwloop);
794252206Sdavidcs
795252206Sdavidcs    // clear 128byte blocks
796252206Sdavidcs    __ bind(fast);
797252206Sdavidcs    __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
798252206Sdavidcs    __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
799252206Sdavidcs
800252206Sdavidcs    __ mtctr(tmp1_reg);                         // load counter
801252206Sdavidcs    __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
802252206Sdavidcs    __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
803252206Sdavidcs
804252206Sdavidcs    __ bind(fastloop);
805252206Sdavidcs      __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
806252206Sdavidcs      __ addi(base_ptr_reg, base_ptr_reg, cl_size);
807252206Sdavidcs    __ bdnz(fastloop);
808252206Sdavidcs
809252206Sdavidcs    //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
810252206Sdavidcs    __ beq(CCR0, lastdword);                    // rest<=1
811252206Sdavidcs    __ mtctr(tmp1_reg);                         // load counter
812252206Sdavidcs
813252206Sdavidcs    // Clear rest.
814252206Sdavidcs    __ bind(restloop);
815252206Sdavidcs      __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
816252206Sdavidcs      __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
817252206Sdavidcs      __ addi(base_ptr_reg, base_ptr_reg, 16);
818252206Sdavidcs    __ bdnz(restloop);
819252206Sdavidcs
820252206Sdavidcs    __ bind(lastdword);
821252206Sdavidcs    __ beq(CCR1, done);
822252206Sdavidcs    __ std(zero_reg, 0, base_ptr_reg);
823252206Sdavidcs    __ bind(done);
824252206Sdavidcs    __ blr();                                   // return
825252206Sdavidcs
826252206Sdavidcs    return start;
827252206Sdavidcs  }
828252206Sdavidcs
829252206Sdavidcs#if !defined(PRODUCT)
830252206Sdavidcs  // Wrapper which calls oopDesc::is_oop_or_null()
831252206Sdavidcs  // Only called by MacroAssembler::verify_oop
832252206Sdavidcs  static void verify_oop_helper(const char* message, oop o) {
833252206Sdavidcs    if (!o->is_oop_or_null()) {
834252206Sdavidcs      fatal("%s", message);
835252206Sdavidcs    }
836252206Sdavidcs    ++ StubRoutines::_verify_oop_count;
837252206Sdavidcs  }
838252206Sdavidcs#endif
839252206Sdavidcs
840252206Sdavidcs  // Return address of code to be called from code generated by
841252206Sdavidcs  // MacroAssembler::verify_oop.
842252206Sdavidcs  //
843252206Sdavidcs  // Don't generate, rather use C++ code.
844252206Sdavidcs  address generate_verify_oop() {
845252206Sdavidcs    // this is actually a `FunctionDescriptor*'.
846252206Sdavidcs    address start = 0;
847252206Sdavidcs
848252206Sdavidcs#if !defined(PRODUCT)
849252206Sdavidcs    start = CAST_FROM_FN_PTR(address, verify_oop_helper);
850252206Sdavidcs#endif
851252206Sdavidcs
852252206Sdavidcs    return start;
853252206Sdavidcs  }
854252206Sdavidcs
855252206Sdavidcs  // Fairer handling of safepoints for native methods.
856252206Sdavidcs  //
857252206Sdavidcs  // Generate code which reads from the polling page. This special handling is needed as the
858252206Sdavidcs  // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
859252206Sdavidcs  // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
860252206Sdavidcs  // to read from the safepoint polling page.
861252206Sdavidcs  address generate_load_from_poll() {
862252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
863252206Sdavidcs    address start = __ function_entry();
864252206Sdavidcs    __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
865252206Sdavidcs    return start;
866252206Sdavidcs  }
867252206Sdavidcs
868252206Sdavidcs  // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
869252206Sdavidcs  //
870252206Sdavidcs  // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
871252206Sdavidcs  // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
872252206Sdavidcs  //
873252206Sdavidcs  // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
874252206Sdavidcs  // for turning on loop predication optimization, and hence the behavior of "array range check"
875252206Sdavidcs  // and "loop invariant check" could be influenced, which potentially boosted JVM98.
876252206Sdavidcs  //
877252206Sdavidcs  // Generate stub for disjoint short fill. If "aligned" is true, the
878252206Sdavidcs  // "to" address is assumed to be heapword aligned.
879252206Sdavidcs  //
880252206Sdavidcs  // Arguments for generated stub:
881252206Sdavidcs  //   to:    R3_ARG1
882252206Sdavidcs  //   value: R4_ARG2
883252206Sdavidcs  //   count: R5_ARG3 treated as signed
884252206Sdavidcs  //
885252206Sdavidcs  address generate_fill(BasicType t, bool aligned, const char* name) {
886252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", name);
887252206Sdavidcs    address start = __ function_entry();
888252206Sdavidcs
889252206Sdavidcs    const Register to    = R3_ARG1;   // source array address
890252206Sdavidcs    const Register value = R4_ARG2;   // fill value
891252206Sdavidcs    const Register count = R5_ARG3;   // elements count
892252206Sdavidcs    const Register temp  = R6_ARG4;   // temp register
893252206Sdavidcs
894252206Sdavidcs    //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
895252206Sdavidcs
896252206Sdavidcs    Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
897252206Sdavidcs    Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
898252206Sdavidcs
899252206Sdavidcs    int shift = -1;
900252206Sdavidcs    switch (t) {
901252206Sdavidcs       case T_BYTE:
902252206Sdavidcs        shift = 2;
903252206Sdavidcs        // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
904252206Sdavidcs        __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
905252206Sdavidcs        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
906252206Sdavidcs        __ blt(CCR0, L_fill_elements);
907252206Sdavidcs        __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
908252206Sdavidcs        break;
909252206Sdavidcs       case T_SHORT:
910252206Sdavidcs        shift = 1;
911252206Sdavidcs        // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
912252206Sdavidcs        __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
913252206Sdavidcs        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
914252206Sdavidcs        __ blt(CCR0, L_fill_elements);
915252206Sdavidcs        break;
916252206Sdavidcs      case T_INT:
917252206Sdavidcs        shift = 0;
918252206Sdavidcs        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
919252206Sdavidcs        __ blt(CCR0, L_fill_4_bytes);
920252206Sdavidcs        break;
921252206Sdavidcs      default: ShouldNotReachHere();
922252206Sdavidcs    }
923252206Sdavidcs
924252206Sdavidcs    if (!aligned && (t == T_BYTE || t == T_SHORT)) {
925252206Sdavidcs      // Align source address at 4 bytes address boundary.
926252206Sdavidcs      if (t == T_BYTE) {
927252206Sdavidcs        // One byte misalignment happens only for byte arrays.
928252206Sdavidcs        __ andi_(temp, to, 1);
929252206Sdavidcs        __ beq(CCR0, L_skip_align1);
930252206Sdavidcs        __ stb(value, 0, to);
931252206Sdavidcs        __ addi(to, to, 1);
932252206Sdavidcs        __ addi(count, count, -1);
933252206Sdavidcs        __ bind(L_skip_align1);
934252206Sdavidcs      }
935252206Sdavidcs      // Two bytes misalignment happens only for byte and short (char) arrays.
936252206Sdavidcs      __ andi_(temp, to, 2);
937252206Sdavidcs      __ beq(CCR0, L_skip_align2);
938252206Sdavidcs      __ sth(value, 0, to);
939252206Sdavidcs      __ addi(to, to, 2);
940252206Sdavidcs      __ addi(count, count, -(1 << (shift - 1)));
941252206Sdavidcs      __ bind(L_skip_align2);
942252206Sdavidcs    }
943252206Sdavidcs
944252206Sdavidcs    if (!aligned) {
945252206Sdavidcs      // Align to 8 bytes, we know we are 4 byte aligned to start.
946252206Sdavidcs      __ andi_(temp, to, 7);
947252206Sdavidcs      __ beq(CCR0, L_fill_32_bytes);
948252206Sdavidcs      __ stw(value, 0, to);
949252206Sdavidcs      __ addi(to, to, 4);
950252206Sdavidcs      __ addi(count, count, -(1 << shift));
951252206Sdavidcs      __ bind(L_fill_32_bytes);
952252206Sdavidcs    }
953252206Sdavidcs
954252206Sdavidcs    __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
955252206Sdavidcs    // Clone bytes int->long as above.
956252206Sdavidcs    __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
957252206Sdavidcs
958252206Sdavidcs    Label L_check_fill_8_bytes;
959252206Sdavidcs    // Fill 32-byte chunks.
960252206Sdavidcs    __ subf_(count, temp, count);
961252206Sdavidcs    __ blt(CCR0, L_check_fill_8_bytes);
962252206Sdavidcs
963252206Sdavidcs    Label L_fill_32_bytes_loop;
964252206Sdavidcs    __ align(32);
965252206Sdavidcs    __ bind(L_fill_32_bytes_loop);
966252206Sdavidcs
967252206Sdavidcs    __ std(value, 0, to);
968252206Sdavidcs    __ std(value, 8, to);
969252206Sdavidcs    __ subf_(count, temp, count);           // Update count.
970252206Sdavidcs    __ std(value, 16, to);
971252206Sdavidcs    __ std(value, 24, to);
972252206Sdavidcs
973252206Sdavidcs    __ addi(to, to, 32);
974252206Sdavidcs    __ bge(CCR0, L_fill_32_bytes_loop);
975252206Sdavidcs
976252206Sdavidcs    __ bind(L_check_fill_8_bytes);
977252206Sdavidcs    __ add_(count, temp, count);
978252206Sdavidcs    __ beq(CCR0, L_exit);
979252206Sdavidcs    __ addic_(count, count, -(2 << shift));
980252206Sdavidcs    __ blt(CCR0, L_fill_4_bytes);
981252206Sdavidcs
982252206Sdavidcs    //
983252206Sdavidcs    // Length is too short, just fill 8 bytes at a time.
984252206Sdavidcs    //
985252206Sdavidcs    Label L_fill_8_bytes_loop;
986252206Sdavidcs    __ bind(L_fill_8_bytes_loop);
987252206Sdavidcs    __ std(value, 0, to);
988252206Sdavidcs    __ addic_(count, count, -(2 << shift));
989252206Sdavidcs    __ addi(to, to, 8);
990252206Sdavidcs    __ bge(CCR0, L_fill_8_bytes_loop);
991252206Sdavidcs
992252206Sdavidcs    // Fill trailing 4 bytes.
993252206Sdavidcs    __ bind(L_fill_4_bytes);
994252206Sdavidcs    __ andi_(temp, count, 1<<shift);
995252206Sdavidcs    __ beq(CCR0, L_fill_2_bytes);
996252206Sdavidcs
997252206Sdavidcs    __ stw(value, 0, to);
998252206Sdavidcs    if (t == T_BYTE || t == T_SHORT) {
999252206Sdavidcs      __ addi(to, to, 4);
1000252206Sdavidcs      // Fill trailing 2 bytes.
1001252206Sdavidcs      __ bind(L_fill_2_bytes);
1002252206Sdavidcs      __ andi_(temp, count, 1<<(shift-1));
1003252206Sdavidcs      __ beq(CCR0, L_fill_byte);
1004252206Sdavidcs      __ sth(value, 0, to);
1005252206Sdavidcs      if (t == T_BYTE) {
1006252206Sdavidcs        __ addi(to, to, 2);
1007252206Sdavidcs        // Fill trailing byte.
1008252206Sdavidcs        __ bind(L_fill_byte);
1009252206Sdavidcs        __ andi_(count, count, 1);
1010252206Sdavidcs        __ beq(CCR0, L_exit);
1011252206Sdavidcs        __ stb(value, 0, to);
1012252206Sdavidcs      } else {
1013252206Sdavidcs        __ bind(L_fill_byte);
1014252206Sdavidcs      }
1015252206Sdavidcs    } else {
1016252206Sdavidcs      __ bind(L_fill_2_bytes);
1017252206Sdavidcs    }
1018252206Sdavidcs    __ bind(L_exit);
1019252206Sdavidcs    __ blr();
1020252206Sdavidcs
1021252206Sdavidcs    // Handle copies less than 8 bytes. Int is handled elsewhere.
1022252206Sdavidcs    if (t == T_BYTE) {
1023252206Sdavidcs      __ bind(L_fill_elements);
1024252206Sdavidcs      Label L_fill_2, L_fill_4;
1025252206Sdavidcs      __ andi_(temp, count, 1);
1026252206Sdavidcs      __ beq(CCR0, L_fill_2);
1027252206Sdavidcs      __ stb(value, 0, to);
1028252206Sdavidcs      __ addi(to, to, 1);
1029252206Sdavidcs      __ bind(L_fill_2);
1030252206Sdavidcs      __ andi_(temp, count, 2);
1031252206Sdavidcs      __ beq(CCR0, L_fill_4);
1032252206Sdavidcs      __ stb(value, 0, to);
1033252206Sdavidcs      __ stb(value, 0, to);
1034252206Sdavidcs      __ addi(to, to, 2);
1035252206Sdavidcs      __ bind(L_fill_4);
1036252206Sdavidcs      __ andi_(temp, count, 4);
1037252206Sdavidcs      __ beq(CCR0, L_exit);
1038252206Sdavidcs      __ stb(value, 0, to);
1039252206Sdavidcs      __ stb(value, 1, to);
1040252206Sdavidcs      __ stb(value, 2, to);
1041252206Sdavidcs      __ stb(value, 3, to);
1042252206Sdavidcs      __ blr();
1043252206Sdavidcs    }
1044252206Sdavidcs
1045252206Sdavidcs    if (t == T_SHORT) {
1046252206Sdavidcs      Label L_fill_2;
1047252206Sdavidcs      __ bind(L_fill_elements);
1048252206Sdavidcs      __ andi_(temp, count, 1);
1049252206Sdavidcs      __ beq(CCR0, L_fill_2);
1050252206Sdavidcs      __ sth(value, 0, to);
1051252206Sdavidcs      __ addi(to, to, 2);
1052252206Sdavidcs      __ bind(L_fill_2);
1053252206Sdavidcs      __ andi_(temp, count, 2);
1054252206Sdavidcs      __ beq(CCR0, L_exit);
1055252206Sdavidcs      __ sth(value, 0, to);
1056252206Sdavidcs      __ sth(value, 2, to);
1057252206Sdavidcs      __ blr();
1058252206Sdavidcs    }
1059252206Sdavidcs    return start;
1060252206Sdavidcs  }
1061252206Sdavidcs
1062252206Sdavidcs  inline void assert_positive_int(Register count) {
1063252206Sdavidcs#ifdef ASSERT
1064252206Sdavidcs    __ srdi_(R0, count, 31);
1065252206Sdavidcs    __ asm_assert_eq("missing zero extend", 0xAFFE);
1066252206Sdavidcs#endif
1067252206Sdavidcs  }
1068252206Sdavidcs
1069252206Sdavidcs  // Generate overlap test for array copy stubs.
1070252206Sdavidcs  //
1071252206Sdavidcs  // Input:
1072252206Sdavidcs  //   R3_ARG1    -  from
1073252206Sdavidcs  //   R4_ARG2    -  to
1074252206Sdavidcs  //   R5_ARG3    -  element count
1075252206Sdavidcs  //
1076252206Sdavidcs  void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1077252206Sdavidcs    Register tmp1 = R6_ARG4;
1078252206Sdavidcs    Register tmp2 = R7_ARG5;
1079252206Sdavidcs
1080252206Sdavidcs    assert_positive_int(R5_ARG3);
1081252206Sdavidcs
1082252206Sdavidcs    __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
1083252206Sdavidcs    __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
1084252206Sdavidcs    __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
1085252206Sdavidcs    __ cmpld(CCR1, tmp1, tmp2);
1086252206Sdavidcs    __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
1087252206Sdavidcs    // Overlaps if Src before dst and distance smaller than size.
1088252206Sdavidcs    // Branch to forward copy routine otherwise (within range of 32kB).
1089252206Sdavidcs    __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target);
1090252206Sdavidcs
1091252206Sdavidcs    // need to copy backwards
1092252206Sdavidcs  }
1093252206Sdavidcs
1094252206Sdavidcs  // The guideline in the implementations of generate_disjoint_xxx_copy
1095252206Sdavidcs  // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
1096252206Sdavidcs  // single instructions, but to avoid alignment interrupts (see subsequent
1097252206Sdavidcs  // comment). Furthermore, we try to minimize misaligned access, even
1098252206Sdavidcs  // though they cause no alignment interrupt.
1099252206Sdavidcs  //
1100252206Sdavidcs  // In Big-Endian mode, the PowerPC architecture requires implementations to
1101252206Sdavidcs  // handle automatically misaligned integer halfword and word accesses,
1102252206Sdavidcs  // word-aligned integer doubleword accesses, and word-aligned floating-point
1103252206Sdavidcs  // accesses. Other accesses may or may not generate an Alignment interrupt
1104252206Sdavidcs  // depending on the implementation.
1105252206Sdavidcs  // Alignment interrupt handling may require on the order of hundreds of cycles,
1106252206Sdavidcs  // so every effort should be made to avoid misaligned memory values.
1107252206Sdavidcs  //
1108252206Sdavidcs  //
1109252206Sdavidcs  // Generate stub for disjoint byte copy.  If "aligned" is true, the
1110252206Sdavidcs  // "from" and "to" addresses are assumed to be heapword aligned.
1111252206Sdavidcs  //
1112252206Sdavidcs  // Arguments for generated stub:
1113252206Sdavidcs  //      from:  R3_ARG1
1114252206Sdavidcs  //      to:    R4_ARG2
1115252206Sdavidcs  //      count: R5_ARG3 treated as signed
1116252206Sdavidcs  //
1117252206Sdavidcs  address generate_disjoint_byte_copy(bool aligned, const char * name) {
1118252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", name);
1119252206Sdavidcs    address start = __ function_entry();
1120252206Sdavidcs    assert_positive_int(R5_ARG3);
1121252206Sdavidcs
1122252206Sdavidcs    Register tmp1 = R6_ARG4;
1123252206Sdavidcs    Register tmp2 = R7_ARG5;
1124252206Sdavidcs    Register tmp3 = R8_ARG6;
1125252206Sdavidcs    Register tmp4 = R9_ARG7;
1126252206Sdavidcs
1127252206Sdavidcs    VectorSRegister tmp_vsr1  = VSR1;
1128252206Sdavidcs    VectorSRegister tmp_vsr2  = VSR2;
1129252206Sdavidcs
1130252206Sdavidcs    Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9, l_10;
1131252206Sdavidcs
1132252206Sdavidcs    // Don't try anything fancy if arrays don't have many elements.
1133252206Sdavidcs    __ li(tmp3, 0);
1134252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 17);
1135252206Sdavidcs    __ ble(CCR0, l_6); // copy 4 at a time
1136252206Sdavidcs
1137252206Sdavidcs    if (!aligned) {
1138252206Sdavidcs      __ xorr(tmp1, R3_ARG1, R4_ARG2);
1139252206Sdavidcs      __ andi_(tmp1, tmp1, 3);
1140252206Sdavidcs      __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1141252206Sdavidcs
1142252206Sdavidcs      // Copy elements if necessary to align to 4 bytes.
1143252206Sdavidcs      __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1144252206Sdavidcs      __ andi_(tmp1, tmp1, 3);
1145252206Sdavidcs      __ beq(CCR0, l_2);
1146252206Sdavidcs
1147252206Sdavidcs      __ subf(R5_ARG3, tmp1, R5_ARG3);
1148252206Sdavidcs      __ bind(l_9);
1149252206Sdavidcs      __ lbz(tmp2, 0, R3_ARG1);
1150252206Sdavidcs      __ addic_(tmp1, tmp1, -1);
1151252206Sdavidcs      __ stb(tmp2, 0, R4_ARG2);
1152252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 1);
1153252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 1);
1154252206Sdavidcs      __ bne(CCR0, l_9);
1155252206Sdavidcs
1156252206Sdavidcs      __ bind(l_2);
1157252206Sdavidcs    }
1158252206Sdavidcs
1159252206Sdavidcs    // copy 8 elements at a time
1160252206Sdavidcs    __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1161252206Sdavidcs    __ andi_(tmp1, tmp2, 7);
1162252206Sdavidcs    __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1163252206Sdavidcs
1164252206Sdavidcs    // copy a 2-element word if necessary to align to 8 bytes
1165252206Sdavidcs    __ andi_(R0, R3_ARG1, 7);
1166252206Sdavidcs    __ beq(CCR0, l_7);
1167252206Sdavidcs
1168252206Sdavidcs    __ lwzx(tmp2, R3_ARG1, tmp3);
1169252206Sdavidcs    __ addi(R5_ARG3, R5_ARG3, -4);
1170252206Sdavidcs    __ stwx(tmp2, R4_ARG2, tmp3);
1171252206Sdavidcs    { // FasterArrayCopy
1172252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 4);
1173252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 4);
1174252206Sdavidcs    }
1175252206Sdavidcs    __ bind(l_7);
1176252206Sdavidcs
1177252206Sdavidcs    { // FasterArrayCopy
1178252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 31);
1179252206Sdavidcs      __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1180252206Sdavidcs
1181252206Sdavidcs      __ srdi(tmp1, R5_ARG3, 5);
1182252206Sdavidcs      __ andi_(R5_ARG3, R5_ARG3, 31);
1183252206Sdavidcs      __ mtctr(tmp1);
1184252206Sdavidcs
1185252206Sdavidcs     if (!VM_Version::has_vsx()) {
1186252206Sdavidcs
1187252206Sdavidcs      __ bind(l_8);
1188252206Sdavidcs      // Use unrolled version for mass copying (copy 32 elements a time)
1189252206Sdavidcs      // Load feeding store gets zero latency on Power6, however not on Power5.
1190252206Sdavidcs      // Therefore, the following sequence is made for the good of both.
1191252206Sdavidcs      __ ld(tmp1, 0, R3_ARG1);
1192252206Sdavidcs      __ ld(tmp2, 8, R3_ARG1);
1193252206Sdavidcs      __ ld(tmp3, 16, R3_ARG1);
1194252206Sdavidcs      __ ld(tmp4, 24, R3_ARG1);
1195252206Sdavidcs      __ std(tmp1, 0, R4_ARG2);
1196252206Sdavidcs      __ std(tmp2, 8, R4_ARG2);
1197252206Sdavidcs      __ std(tmp3, 16, R4_ARG2);
1198252206Sdavidcs      __ std(tmp4, 24, R4_ARG2);
1199252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 32);
1200252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 32);
1201252206Sdavidcs      __ bdnz(l_8);
1202252206Sdavidcs
1203252206Sdavidcs    } else { // Processor supports VSX, so use it to mass copy.
1204252206Sdavidcs
1205252206Sdavidcs      // Prefetch the data into the L2 cache.
1206252206Sdavidcs      __ dcbt(R3_ARG1, 0);
1207252206Sdavidcs
1208252206Sdavidcs      // If supported set DSCR pre-fetch to deepest.
1209252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1210252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1211252206Sdavidcs        __ mtdscr(tmp2);
1212252206Sdavidcs      }
1213252206Sdavidcs
1214252206Sdavidcs      __ li(tmp1, 16);
1215252206Sdavidcs
1216252206Sdavidcs      // Backbranch target aligned to 32-byte. Not 16-byte align as
1217252206Sdavidcs      // loop contains < 8 instructions that fit inside a single
1218252206Sdavidcs      // i-cache sector.
1219252206Sdavidcs      __ align(32);
1220252206Sdavidcs
1221252206Sdavidcs      __ bind(l_10);
1222252206Sdavidcs      // Use loop with VSX load/store instructions to
1223252206Sdavidcs      // copy 32 elements a time.
1224252206Sdavidcs      __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1225252206Sdavidcs      __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1226252206Sdavidcs      __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1227252206Sdavidcs      __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1228252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1229252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1230252206Sdavidcs      __ bdnz(l_10);                       // Dec CTR and loop if not zero.
1231252206Sdavidcs
1232252206Sdavidcs      // Restore DSCR pre-fetch value.
1233252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1234252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1235252206Sdavidcs        __ mtdscr(tmp2);
1236252206Sdavidcs      }
1237252206Sdavidcs
1238252206Sdavidcs    } // VSX
1239252206Sdavidcs   } // FasterArrayCopy
1240252206Sdavidcs
1241252206Sdavidcs    __ bind(l_6);
1242252206Sdavidcs
1243252206Sdavidcs    // copy 4 elements at a time
1244252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 4);
1245252206Sdavidcs    __ blt(CCR0, l_1);
1246252206Sdavidcs    __ srdi(tmp1, R5_ARG3, 2);
1247252206Sdavidcs    __ mtctr(tmp1); // is > 0
1248252206Sdavidcs    __ andi_(R5_ARG3, R5_ARG3, 3);
1249252206Sdavidcs
1250252206Sdavidcs    { // FasterArrayCopy
1251252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -4);
1252252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -4);
1253252206Sdavidcs      __ bind(l_3);
1254252206Sdavidcs      __ lwzu(tmp2, 4, R3_ARG1);
1255252206Sdavidcs      __ stwu(tmp2, 4, R4_ARG2);
1256252206Sdavidcs      __ bdnz(l_3);
1257252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 4);
1258252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 4);
1259252206Sdavidcs    }
1260252206Sdavidcs
1261252206Sdavidcs    // do single element copy
1262252206Sdavidcs    __ bind(l_1);
1263252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 0);
1264252206Sdavidcs    __ beq(CCR0, l_4);
1265252206Sdavidcs
1266252206Sdavidcs    { // FasterArrayCopy
1267252206Sdavidcs      __ mtctr(R5_ARG3);
1268252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -1);
1269252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -1);
1270252206Sdavidcs
1271252206Sdavidcs      __ bind(l_5);
1272252206Sdavidcs      __ lbzu(tmp2, 1, R3_ARG1);
1273252206Sdavidcs      __ stbu(tmp2, 1, R4_ARG2);
1274252206Sdavidcs      __ bdnz(l_5);
1275252206Sdavidcs    }
1276252206Sdavidcs
1277252206Sdavidcs    __ bind(l_4);
1278252206Sdavidcs    __ li(R3_RET, 0); // return 0
1279252206Sdavidcs    __ blr();
1280252206Sdavidcs
1281252206Sdavidcs    return start;
1282252206Sdavidcs  }
1283252206Sdavidcs
1284252206Sdavidcs  // Generate stub for conjoint byte copy.  If "aligned" is true, the
1285252206Sdavidcs  // "from" and "to" addresses are assumed to be heapword aligned.
1286252206Sdavidcs  //
1287252206Sdavidcs  // Arguments for generated stub:
1288252206Sdavidcs  //      from:  R3_ARG1
1289252206Sdavidcs  //      to:    R4_ARG2
1290252206Sdavidcs  //      count: R5_ARG3 treated as signed
1291252206Sdavidcs  //
1292252206Sdavidcs  address generate_conjoint_byte_copy(bool aligned, const char * name) {
1293252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", name);
1294252206Sdavidcs    address start = __ function_entry();
1295252206Sdavidcs    assert_positive_int(R5_ARG3);
1296252206Sdavidcs
1297252206Sdavidcs    Register tmp1 = R6_ARG4;
1298252206Sdavidcs    Register tmp2 = R7_ARG5;
1299252206Sdavidcs    Register tmp3 = R8_ARG6;
1300252206Sdavidcs
1301252206Sdavidcs    address nooverlap_target = aligned ?
1302252206Sdavidcs      STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
1303252206Sdavidcs      STUB_ENTRY(jbyte_disjoint_arraycopy);
1304252206Sdavidcs
1305252206Sdavidcs    array_overlap_test(nooverlap_target, 0);
1306252206Sdavidcs    // Do reverse copy. We assume the case of actual overlap is rare enough
1307252206Sdavidcs    // that we don't have to optimize it.
1308252206Sdavidcs    Label l_1, l_2;
1309252206Sdavidcs
1310252206Sdavidcs    __ b(l_2);
1311252206Sdavidcs    __ bind(l_1);
1312252206Sdavidcs    __ stbx(tmp1, R4_ARG2, R5_ARG3);
1313252206Sdavidcs    __ bind(l_2);
1314252206Sdavidcs    __ addic_(R5_ARG3, R5_ARG3, -1);
1315252206Sdavidcs    __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1316252206Sdavidcs    __ bge(CCR0, l_1);
1317252206Sdavidcs
1318252206Sdavidcs    __ li(R3_RET, 0); // return 0
1319252206Sdavidcs    __ blr();
1320252206Sdavidcs
1321252206Sdavidcs    return start;
1322252206Sdavidcs  }
1323252206Sdavidcs
1324252206Sdavidcs  // Generate stub for disjoint short copy.  If "aligned" is true, the
1325252206Sdavidcs  // "from" and "to" addresses are assumed to be heapword aligned.
1326252206Sdavidcs  //
1327252206Sdavidcs  // Arguments for generated stub:
1328252206Sdavidcs  //      from:  R3_ARG1
1329252206Sdavidcs  //      to:    R4_ARG2
1330252206Sdavidcs  //  elm.count: R5_ARG3 treated as signed
1331252206Sdavidcs  //
1332252206Sdavidcs  // Strategy for aligned==true:
1333252206Sdavidcs  //
1334252206Sdavidcs  //  If length <= 9:
1335252206Sdavidcs  //     1. copy 2 elements at a time (l_6)
1336252206Sdavidcs  //     2. copy last element if original element count was odd (l_1)
1337252206Sdavidcs  //
1338252206Sdavidcs  //  If length > 9:
1339252206Sdavidcs  //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1340252206Sdavidcs  //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1341252206Sdavidcs  //     3. copy last element if one was left in step 2. (l_1)
1342252206Sdavidcs  //
1343252206Sdavidcs  //
1344252206Sdavidcs  // Strategy for aligned==false:
1345252206Sdavidcs  //
1346252206Sdavidcs  //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1347252206Sdavidcs  //                  can be unaligned (see comment below)
1348252206Sdavidcs  //
1349252206Sdavidcs  //  If length > 9:
1350252206Sdavidcs  //     1. continue with step 6. if the alignment of from and to mod 4
1351252206Sdavidcs  //        is different.
1352252206Sdavidcs  //     2. align from and to to 4 bytes by copying 1 element if necessary
1353252206Sdavidcs  //     3. at l_2 from and to are 4 byte aligned; continue with
1354252206Sdavidcs  //        5. if they cannot be aligned to 8 bytes because they have
1355252206Sdavidcs  //        got different alignment mod 8.
1356252206Sdavidcs  //     4. at this point we know that both, from and to, have the same
1357252206Sdavidcs  //        alignment mod 8, now copy one element if necessary to get
1358252206Sdavidcs  //        8 byte alignment of from and to.
1359252206Sdavidcs  //     5. copy 4 elements at a time until less than 4 elements are
1360252206Sdavidcs  //        left; depending on step 3. all load/stores are aligned or
1361252206Sdavidcs  //        either all loads or all stores are unaligned.
1362252206Sdavidcs  //     6. copy 2 elements at a time until less than 2 elements are
1363252206Sdavidcs  //        left (l_6); arriving here from step 1., there is a chance
1364252206Sdavidcs  //        that all accesses are unaligned.
1365252206Sdavidcs  //     7. copy last element if one was left in step 6. (l_1)
1366252206Sdavidcs  //
1367252206Sdavidcs  //  There are unaligned data accesses using integer load/store
1368252206Sdavidcs  //  instructions in this stub. POWER allows such accesses.
1369252206Sdavidcs  //
1370252206Sdavidcs  //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1371252206Sdavidcs  //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1372252206Sdavidcs  //  integer load/stores have good performance. Only unaligned
1373252206Sdavidcs  //  floating point load/stores can have poor performance.
1374252206Sdavidcs  //
1375252206Sdavidcs  //  TODO:
1376252206Sdavidcs  //
1377252206Sdavidcs  //  1. check if aligning the backbranch target of loops is beneficial
1378252206Sdavidcs  //
1379252206Sdavidcs  address generate_disjoint_short_copy(bool aligned, const char * name) {
1380252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", name);
1381252206Sdavidcs
1382252206Sdavidcs    Register tmp1 = R6_ARG4;
1383252206Sdavidcs    Register tmp2 = R7_ARG5;
1384252206Sdavidcs    Register tmp3 = R8_ARG6;
1385252206Sdavidcs    Register tmp4 = R9_ARG7;
1386252206Sdavidcs
1387252206Sdavidcs    VectorSRegister tmp_vsr1  = VSR1;
1388252206Sdavidcs    VectorSRegister tmp_vsr2  = VSR2;
1389252206Sdavidcs
1390252206Sdavidcs    address start = __ function_entry();
1391252206Sdavidcs    assert_positive_int(R5_ARG3);
1392252206Sdavidcs
1393252206Sdavidcs    Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1394252206Sdavidcs
1395252206Sdavidcs    // don't try anything fancy if arrays don't have many elements
1396252206Sdavidcs    __ li(tmp3, 0);
1397252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 9);
1398252206Sdavidcs    __ ble(CCR0, l_6); // copy 2 at a time
1399252206Sdavidcs
1400252206Sdavidcs    if (!aligned) {
1401252206Sdavidcs      __ xorr(tmp1, R3_ARG1, R4_ARG2);
1402252206Sdavidcs      __ andi_(tmp1, tmp1, 3);
1403252206Sdavidcs      __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1404252206Sdavidcs
1405252206Sdavidcs      // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1406252206Sdavidcs
1407252206Sdavidcs      // Copy 1 element if necessary to align to 4 bytes.
1408252206Sdavidcs      __ andi_(tmp1, R3_ARG1, 3);
1409252206Sdavidcs      __ beq(CCR0, l_2);
1410252206Sdavidcs
1411252206Sdavidcs      __ lhz(tmp2, 0, R3_ARG1);
1412252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 2);
1413252206Sdavidcs      __ sth(tmp2, 0, R4_ARG2);
1414252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 2);
1415252206Sdavidcs      __ addi(R5_ARG3, R5_ARG3, -1);
1416252206Sdavidcs      __ bind(l_2);
1417252206Sdavidcs
1418252206Sdavidcs      // At this point the positions of both, from and to, are at least 4 byte aligned.
1419252206Sdavidcs
1420252206Sdavidcs      // Copy 4 elements at a time.
1421252206Sdavidcs      // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1422252206Sdavidcs      __ xorr(tmp2, R3_ARG1, R4_ARG2);
1423252206Sdavidcs      __ andi_(tmp1, tmp2, 7);
1424252206Sdavidcs      __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1425252206Sdavidcs
1426252206Sdavidcs      // Copy a 2-element word if necessary to align to 8 bytes.
1427252206Sdavidcs      __ andi_(R0, R3_ARG1, 7);
1428252206Sdavidcs      __ beq(CCR0, l_7);
1429252206Sdavidcs
1430252206Sdavidcs      __ lwzx(tmp2, R3_ARG1, tmp3);
1431252206Sdavidcs      __ addi(R5_ARG3, R5_ARG3, -2);
1432252206Sdavidcs      __ stwx(tmp2, R4_ARG2, tmp3);
1433252206Sdavidcs      { // FasterArrayCopy
1434252206Sdavidcs        __ addi(R3_ARG1, R3_ARG1, 4);
1435252206Sdavidcs        __ addi(R4_ARG2, R4_ARG2, 4);
1436252206Sdavidcs      }
1437252206Sdavidcs    }
1438252206Sdavidcs
1439252206Sdavidcs    __ bind(l_7);
1440252206Sdavidcs
1441252206Sdavidcs    // Copy 4 elements at a time; either the loads or the stores can
1442252206Sdavidcs    // be unaligned if aligned == false.
1443252206Sdavidcs
1444252206Sdavidcs    { // FasterArrayCopy
1445252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 15);
1446252206Sdavidcs      __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1447252206Sdavidcs
1448252206Sdavidcs      __ srdi(tmp1, R5_ARG3, 4);
1449252206Sdavidcs      __ andi_(R5_ARG3, R5_ARG3, 15);
1450252206Sdavidcs      __ mtctr(tmp1);
1451252206Sdavidcs
1452252206Sdavidcs      if (!VM_Version::has_vsx()) {
1453252206Sdavidcs
1454252206Sdavidcs        __ bind(l_8);
1455252206Sdavidcs        // Use unrolled version for mass copying (copy 16 elements a time).
1456252206Sdavidcs        // Load feeding store gets zero latency on Power6, however not on Power5.
1457252206Sdavidcs        // Therefore, the following sequence is made for the good of both.
1458252206Sdavidcs        __ ld(tmp1, 0, R3_ARG1);
1459252206Sdavidcs        __ ld(tmp2, 8, R3_ARG1);
1460252206Sdavidcs        __ ld(tmp3, 16, R3_ARG1);
1461252206Sdavidcs        __ ld(tmp4, 24, R3_ARG1);
1462252206Sdavidcs        __ std(tmp1, 0, R4_ARG2);
1463252206Sdavidcs        __ std(tmp2, 8, R4_ARG2);
1464252206Sdavidcs        __ std(tmp3, 16, R4_ARG2);
1465252206Sdavidcs        __ std(tmp4, 24, R4_ARG2);
1466252206Sdavidcs        __ addi(R3_ARG1, R3_ARG1, 32);
1467252206Sdavidcs        __ addi(R4_ARG2, R4_ARG2, 32);
1468252206Sdavidcs        __ bdnz(l_8);
1469252206Sdavidcs
1470252206Sdavidcs      } else { // Processor supports VSX, so use it to mass copy.
1471252206Sdavidcs
1472252206Sdavidcs        // Prefetch src data into L2 cache.
1473252206Sdavidcs        __ dcbt(R3_ARG1, 0);
1474252206Sdavidcs
1475252206Sdavidcs        // If supported set DSCR pre-fetch to deepest.
1476252206Sdavidcs        if (VM_Version::has_mfdscr()) {
1477252206Sdavidcs          __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1478252206Sdavidcs          __ mtdscr(tmp2);
1479252206Sdavidcs        }
1480252206Sdavidcs        __ li(tmp1, 16);
1481252206Sdavidcs
1482252206Sdavidcs        // Backbranch target aligned to 32-byte. It's not aligned 16-byte
1483252206Sdavidcs        // as loop contains < 8 instructions that fit inside a single
1484252206Sdavidcs        // i-cache sector.
1485252206Sdavidcs        __ align(32);
1486252206Sdavidcs
1487252206Sdavidcs        __ bind(l_9);
1488252206Sdavidcs        // Use loop with VSX load/store instructions to
1489252206Sdavidcs        // copy 16 elements a time.
1490252206Sdavidcs        __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load from src.
1491252206Sdavidcs        __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst.
1492252206Sdavidcs        __ lxvd2x(tmp_vsr2, R3_ARG1, tmp1);  // Load from src + 16.
1493252206Sdavidcs        __ stxvd2x(tmp_vsr2, R4_ARG2, tmp1); // Store to dst + 16.
1494252206Sdavidcs        __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32.
1495252206Sdavidcs        __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32.
1496252206Sdavidcs        __ bdnz(l_9);                        // Dec CTR and loop if not zero.
1497252206Sdavidcs
1498252206Sdavidcs        // Restore DSCR pre-fetch value.
1499252206Sdavidcs        if (VM_Version::has_mfdscr()) {
1500252206Sdavidcs          __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1501252206Sdavidcs          __ mtdscr(tmp2);
1502252206Sdavidcs        }
1503252206Sdavidcs
1504252206Sdavidcs      }
1505252206Sdavidcs    } // FasterArrayCopy
1506252206Sdavidcs    __ bind(l_6);
1507252206Sdavidcs
1508252206Sdavidcs    // copy 2 elements at a time
1509252206Sdavidcs    { // FasterArrayCopy
1510252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 2);
1511252206Sdavidcs      __ blt(CCR0, l_1);
1512252206Sdavidcs      __ srdi(tmp1, R5_ARG3, 1);
1513252206Sdavidcs      __ andi_(R5_ARG3, R5_ARG3, 1);
1514252206Sdavidcs
1515252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -4);
1516252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -4);
1517252206Sdavidcs      __ mtctr(tmp1);
1518252206Sdavidcs
1519252206Sdavidcs      __ bind(l_3);
1520252206Sdavidcs      __ lwzu(tmp2, 4, R3_ARG1);
1521252206Sdavidcs      __ stwu(tmp2, 4, R4_ARG2);
1522252206Sdavidcs      __ bdnz(l_3);
1523252206Sdavidcs
1524252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 4);
1525252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 4);
1526252206Sdavidcs    }
1527252206Sdavidcs
1528252206Sdavidcs    // do single element copy
1529252206Sdavidcs    __ bind(l_1);
1530252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 0);
1531252206Sdavidcs    __ beq(CCR0, l_4);
1532252206Sdavidcs
1533252206Sdavidcs    { // FasterArrayCopy
1534252206Sdavidcs      __ mtctr(R5_ARG3);
1535252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -2);
1536252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -2);
1537252206Sdavidcs
1538252206Sdavidcs      __ bind(l_5);
1539252206Sdavidcs      __ lhzu(tmp2, 2, R3_ARG1);
1540252206Sdavidcs      __ sthu(tmp2, 2, R4_ARG2);
1541252206Sdavidcs      __ bdnz(l_5);
1542252206Sdavidcs    }
1543252206Sdavidcs    __ bind(l_4);
1544252206Sdavidcs    __ li(R3_RET, 0); // return 0
1545252206Sdavidcs    __ blr();
1546252206Sdavidcs
1547252206Sdavidcs    return start;
1548252206Sdavidcs  }
1549252206Sdavidcs
1550252206Sdavidcs  // Generate stub for conjoint short copy.  If "aligned" is true, the
1551252206Sdavidcs  // "from" and "to" addresses are assumed to be heapword aligned.
1552252206Sdavidcs  //
1553252206Sdavidcs  // Arguments for generated stub:
1554252206Sdavidcs  //      from:  R3_ARG1
1555252206Sdavidcs  //      to:    R4_ARG2
1556252206Sdavidcs  //      count: R5_ARG3 treated as signed
1557252206Sdavidcs  //
1558252206Sdavidcs  address generate_conjoint_short_copy(bool aligned, const char * name) {
1559252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", name);
1560252206Sdavidcs    address start = __ function_entry();
1561252206Sdavidcs    assert_positive_int(R5_ARG3);
1562252206Sdavidcs
1563252206Sdavidcs    Register tmp1 = R6_ARG4;
1564252206Sdavidcs    Register tmp2 = R7_ARG5;
1565252206Sdavidcs    Register tmp3 = R8_ARG6;
1566252206Sdavidcs
1567252206Sdavidcs    address nooverlap_target = aligned ?
1568252206Sdavidcs      STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
1569252206Sdavidcs      STUB_ENTRY(jshort_disjoint_arraycopy);
1570252206Sdavidcs
1571252206Sdavidcs    array_overlap_test(nooverlap_target, 1);
1572252206Sdavidcs
1573252206Sdavidcs    Label l_1, l_2;
1574252206Sdavidcs    __ sldi(tmp1, R5_ARG3, 1);
1575252206Sdavidcs    __ b(l_2);
1576252206Sdavidcs    __ bind(l_1);
1577252206Sdavidcs    __ sthx(tmp2, R4_ARG2, tmp1);
1578252206Sdavidcs    __ bind(l_2);
1579252206Sdavidcs    __ addic_(tmp1, tmp1, -2);
1580252206Sdavidcs    __ lhzx(tmp2, R3_ARG1, tmp1);
1581252206Sdavidcs    __ bge(CCR0, l_1);
1582252206Sdavidcs
1583252206Sdavidcs    __ li(R3_RET, 0); // return 0
1584252206Sdavidcs    __ blr();
1585252206Sdavidcs
1586252206Sdavidcs    return start;
1587252206Sdavidcs  }
1588252206Sdavidcs
1589252206Sdavidcs  // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1590252206Sdavidcs  // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1591252206Sdavidcs  //
1592252206Sdavidcs  // Arguments:
1593252206Sdavidcs  //      from:  R3_ARG1
1594252206Sdavidcs  //      to:    R4_ARG2
1595252206Sdavidcs  //      count: R5_ARG3 treated as signed
1596252206Sdavidcs  //
1597252206Sdavidcs  void generate_disjoint_int_copy_core(bool aligned) {
1598252206Sdavidcs    Register tmp1 = R6_ARG4;
1599252206Sdavidcs    Register tmp2 = R7_ARG5;
1600252206Sdavidcs    Register tmp3 = R8_ARG6;
1601252206Sdavidcs    Register tmp4 = R0;
1602252206Sdavidcs
1603252206Sdavidcs    VectorSRegister tmp_vsr1  = VSR1;
1604252206Sdavidcs    VectorSRegister tmp_vsr2  = VSR2;
1605252206Sdavidcs
1606252206Sdavidcs    Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1607252206Sdavidcs
1608252206Sdavidcs    // for short arrays, just do single element copy
1609252206Sdavidcs    __ li(tmp3, 0);
1610252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 5);
1611252206Sdavidcs    __ ble(CCR0, l_2);
1612252206Sdavidcs
1613252206Sdavidcs    if (!aligned) {
1614252206Sdavidcs        // check if arrays have same alignment mod 8.
1615252206Sdavidcs        __ xorr(tmp1, R3_ARG1, R4_ARG2);
1616252206Sdavidcs        __ andi_(R0, tmp1, 7);
1617252206Sdavidcs        // Not the same alignment, but ld and std just need to be 4 byte aligned.
1618252206Sdavidcs        __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1619252206Sdavidcs
1620252206Sdavidcs        // copy 1 element to align to and from on an 8 byte boundary
1621252206Sdavidcs        __ andi_(R0, R3_ARG1, 7);
1622252206Sdavidcs        __ beq(CCR0, l_4);
1623252206Sdavidcs
1624252206Sdavidcs        __ lwzx(tmp2, R3_ARG1, tmp3);
1625252206Sdavidcs        __ addi(R5_ARG3, R5_ARG3, -1);
1626252206Sdavidcs        __ stwx(tmp2, R4_ARG2, tmp3);
1627252206Sdavidcs        { // FasterArrayCopy
1628252206Sdavidcs          __ addi(R3_ARG1, R3_ARG1, 4);
1629252206Sdavidcs          __ addi(R4_ARG2, R4_ARG2, 4);
1630252206Sdavidcs        }
1631252206Sdavidcs        __ bind(l_4);
1632252206Sdavidcs      }
1633252206Sdavidcs
1634252206Sdavidcs    { // FasterArrayCopy
1635252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 7);
1636252206Sdavidcs      __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1637252206Sdavidcs
1638252206Sdavidcs      __ srdi(tmp1, R5_ARG3, 3);
1639252206Sdavidcs      __ andi_(R5_ARG3, R5_ARG3, 7);
1640252206Sdavidcs      __ mtctr(tmp1);
1641252206Sdavidcs
1642252206Sdavidcs     if (!VM_Version::has_vsx()) {
1643252206Sdavidcs
1644252206Sdavidcs      __ bind(l_6);
1645252206Sdavidcs      // Use unrolled version for mass copying (copy 8 elements a time).
1646252206Sdavidcs      // Load feeding store gets zero latency on power6, however not on power 5.
1647252206Sdavidcs      // Therefore, the following sequence is made for the good of both.
1648252206Sdavidcs      __ ld(tmp1, 0, R3_ARG1);
1649252206Sdavidcs      __ ld(tmp2, 8, R3_ARG1);
1650252206Sdavidcs      __ ld(tmp3, 16, R3_ARG1);
1651252206Sdavidcs      __ ld(tmp4, 24, R3_ARG1);
1652252206Sdavidcs      __ std(tmp1, 0, R4_ARG2);
1653252206Sdavidcs      __ std(tmp2, 8, R4_ARG2);
1654252206Sdavidcs      __ std(tmp3, 16, R4_ARG2);
1655252206Sdavidcs      __ std(tmp4, 24, R4_ARG2);
1656252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 32);
1657252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 32);
1658252206Sdavidcs      __ bdnz(l_6);
1659252206Sdavidcs
1660252206Sdavidcs    } else { // Processor supports VSX, so use it to mass copy.
1661252206Sdavidcs
1662252206Sdavidcs      // Prefetch the data into the L2 cache.
1663252206Sdavidcs      __ dcbt(R3_ARG1, 0);
1664252206Sdavidcs
1665252206Sdavidcs      // If supported set DSCR pre-fetch to deepest.
1666252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1667252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1668252206Sdavidcs        __ mtdscr(tmp2);
1669252206Sdavidcs      }
1670252206Sdavidcs
1671252206Sdavidcs      __ li(tmp1, 16);
1672252206Sdavidcs
1673252206Sdavidcs      // Backbranch target aligned to 32-byte. Not 16-byte align as
1674252206Sdavidcs      // loop contains < 8 instructions that fit inside a single
1675252206Sdavidcs      // i-cache sector.
1676252206Sdavidcs      __ align(32);
1677252206Sdavidcs
1678252206Sdavidcs      __ bind(l_7);
1679252206Sdavidcs      // Use loop with VSX load/store instructions to
1680252206Sdavidcs      // copy 8 elements a time.
1681252206Sdavidcs      __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1682252206Sdavidcs      __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1683252206Sdavidcs      __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1684252206Sdavidcs      __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1685252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1686252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1687252206Sdavidcs      __ bdnz(l_7);                        // Dec CTR and loop if not zero.
1688252206Sdavidcs
1689252206Sdavidcs      // Restore DSCR pre-fetch value.
1690252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1691252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1692252206Sdavidcs        __ mtdscr(tmp2);
1693252206Sdavidcs      }
1694252206Sdavidcs
1695252206Sdavidcs    } // VSX
1696252206Sdavidcs   } // FasterArrayCopy
1697252206Sdavidcs
1698252206Sdavidcs    // copy 1 element at a time
1699252206Sdavidcs    __ bind(l_2);
1700252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 0);
1701252206Sdavidcs    __ beq(CCR0, l_1);
1702252206Sdavidcs
1703252206Sdavidcs    { // FasterArrayCopy
1704252206Sdavidcs      __ mtctr(R5_ARG3);
1705252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -4);
1706252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -4);
1707252206Sdavidcs
1708252206Sdavidcs      __ bind(l_3);
1709252206Sdavidcs      __ lwzu(tmp2, 4, R3_ARG1);
1710252206Sdavidcs      __ stwu(tmp2, 4, R4_ARG2);
1711252206Sdavidcs      __ bdnz(l_3);
1712252206Sdavidcs    }
1713252206Sdavidcs
1714252206Sdavidcs    __ bind(l_1);
1715252206Sdavidcs    return;
1716252206Sdavidcs  }
1717252206Sdavidcs
1718252206Sdavidcs  // Generate stub for disjoint int copy.  If "aligned" is true, the
1719252206Sdavidcs  // "from" and "to" addresses are assumed to be heapword aligned.
1720252206Sdavidcs  //
1721252206Sdavidcs  // Arguments for generated stub:
1722252206Sdavidcs  //      from:  R3_ARG1
1723252206Sdavidcs  //      to:    R4_ARG2
1724252206Sdavidcs  //      count: R5_ARG3 treated as signed
1725252206Sdavidcs  //
1726252206Sdavidcs  address generate_disjoint_int_copy(bool aligned, const char * name) {
1727252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", name);
1728252206Sdavidcs    address start = __ function_entry();
1729252206Sdavidcs    assert_positive_int(R5_ARG3);
1730252206Sdavidcs    generate_disjoint_int_copy_core(aligned);
1731252206Sdavidcs    __ li(R3_RET, 0); // return 0
1732252206Sdavidcs    __ blr();
1733252206Sdavidcs    return start;
1734252206Sdavidcs  }
1735252206Sdavidcs
1736252206Sdavidcs  // Generate core code for conjoint int copy (and oop copy on
1737252206Sdavidcs  // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1738252206Sdavidcs  // are assumed to be heapword aligned.
1739252206Sdavidcs  //
1740252206Sdavidcs  // Arguments:
1741252206Sdavidcs  //      from:  R3_ARG1
1742252206Sdavidcs  //      to:    R4_ARG2
1743252206Sdavidcs  //      count: R5_ARG3 treated as signed
1744252206Sdavidcs  //
1745252206Sdavidcs  void generate_conjoint_int_copy_core(bool aligned) {
1746252206Sdavidcs    // Do reverse copy.  We assume the case of actual overlap is rare enough
1747252206Sdavidcs    // that we don't have to optimize it.
1748252206Sdavidcs
1749252206Sdavidcs    Label l_1, l_2, l_3, l_4, l_5, l_6, l_7;
1750252206Sdavidcs
1751252206Sdavidcs    Register tmp1 = R6_ARG4;
1752252206Sdavidcs    Register tmp2 = R7_ARG5;
1753252206Sdavidcs    Register tmp3 = R8_ARG6;
1754252206Sdavidcs    Register tmp4 = R0;
1755252206Sdavidcs
1756252206Sdavidcs    VectorSRegister tmp_vsr1  = VSR1;
1757252206Sdavidcs    VectorSRegister tmp_vsr2  = VSR2;
1758252206Sdavidcs
1759252206Sdavidcs    { // FasterArrayCopy
1760252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 0);
1761252206Sdavidcs      __ beq(CCR0, l_6);
1762252206Sdavidcs
1763252206Sdavidcs      __ sldi(R5_ARG3, R5_ARG3, 2);
1764252206Sdavidcs      __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1765252206Sdavidcs      __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1766252206Sdavidcs      __ srdi(R5_ARG3, R5_ARG3, 2);
1767252206Sdavidcs
1768252206Sdavidcs      if (!aligned) {
1769252206Sdavidcs        // check if arrays have same alignment mod 8.
1770252206Sdavidcs        __ xorr(tmp1, R3_ARG1, R4_ARG2);
1771252206Sdavidcs        __ andi_(R0, tmp1, 7);
1772252206Sdavidcs        // Not the same alignment, but ld and std just need to be 4 byte aligned.
1773252206Sdavidcs        __ bne(CCR0, l_7); // to OR from is 8 byte aligned -> copy 2 at a time
1774252206Sdavidcs
1775252206Sdavidcs        // copy 1 element to align to and from on an 8 byte boundary
1776252206Sdavidcs        __ andi_(R0, R3_ARG1, 7);
1777252206Sdavidcs        __ beq(CCR0, l_7);
1778252206Sdavidcs
1779252206Sdavidcs        __ addi(R3_ARG1, R3_ARG1, -4);
1780252206Sdavidcs        __ addi(R4_ARG2, R4_ARG2, -4);
1781252206Sdavidcs        __ addi(R5_ARG3, R5_ARG3, -1);
1782252206Sdavidcs        __ lwzx(tmp2, R3_ARG1);
1783252206Sdavidcs        __ stwx(tmp2, R4_ARG2);
1784252206Sdavidcs        __ bind(l_7);
1785252206Sdavidcs      }
1786252206Sdavidcs
1787252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 7);
1788252206Sdavidcs      __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1789252206Sdavidcs
1790252206Sdavidcs      __ srdi(tmp1, R5_ARG3, 3);
1791252206Sdavidcs      __ andi(R5_ARG3, R5_ARG3, 7);
1792252206Sdavidcs      __ mtctr(tmp1);
1793252206Sdavidcs
1794252206Sdavidcs     if (!VM_Version::has_vsx()) {
1795252206Sdavidcs      __ bind(l_4);
1796252206Sdavidcs      // Use unrolled version for mass copying (copy 4 elements a time).
1797252206Sdavidcs      // Load feeding store gets zero latency on Power6, however not on Power5.
1798252206Sdavidcs      // Therefore, the following sequence is made for the good of both.
1799252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -32);
1800252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -32);
1801252206Sdavidcs      __ ld(tmp4, 24, R3_ARG1);
1802252206Sdavidcs      __ ld(tmp3, 16, R3_ARG1);
1803252206Sdavidcs      __ ld(tmp2, 8, R3_ARG1);
1804252206Sdavidcs      __ ld(tmp1, 0, R3_ARG1);
1805252206Sdavidcs      __ std(tmp4, 24, R4_ARG2);
1806252206Sdavidcs      __ std(tmp3, 16, R4_ARG2);
1807252206Sdavidcs      __ std(tmp2, 8, R4_ARG2);
1808252206Sdavidcs      __ std(tmp1, 0, R4_ARG2);
1809252206Sdavidcs      __ bdnz(l_4);
1810252206Sdavidcs     } else {  // Processor supports VSX, so use it to mass copy.
1811252206Sdavidcs      // Prefetch the data into the L2 cache.
1812252206Sdavidcs      __ dcbt(R3_ARG1, 0);
1813252206Sdavidcs
1814252206Sdavidcs      // If supported set DSCR pre-fetch to deepest.
1815252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1816252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1817252206Sdavidcs        __ mtdscr(tmp2);
1818252206Sdavidcs      }
1819252206Sdavidcs
1820252206Sdavidcs      __ li(tmp1, 16);
1821252206Sdavidcs
1822252206Sdavidcs      // Backbranch target aligned to 32-byte. Not 16-byte align as
1823252206Sdavidcs      // loop contains < 8 instructions that fit inside a single
1824252206Sdavidcs      // i-cache sector.
1825252206Sdavidcs      __ align(32);
1826252206Sdavidcs
1827252206Sdavidcs      __ bind(l_4);
1828252206Sdavidcs      // Use loop with VSX load/store instructions to
1829252206Sdavidcs      // copy 8 elements a time.
1830252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
1831252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
1832252206Sdavidcs      __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
1833252206Sdavidcs      __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1834252206Sdavidcs      __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
1835252206Sdavidcs      __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1836252206Sdavidcs      __ bdnz(l_4);
1837252206Sdavidcs
1838252206Sdavidcs      // Restore DSCR pre-fetch value.
1839252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1840252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1841252206Sdavidcs        __ mtdscr(tmp2);
1842252206Sdavidcs      }
1843252206Sdavidcs     }
1844252206Sdavidcs
1845252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 0);
1846252206Sdavidcs      __ beq(CCR0, l_6);
1847252206Sdavidcs
1848252206Sdavidcs      __ bind(l_5);
1849252206Sdavidcs      __ mtctr(R5_ARG3);
1850252206Sdavidcs      __ bind(l_3);
1851252206Sdavidcs      __ lwz(R0, -4, R3_ARG1);
1852252206Sdavidcs      __ stw(R0, -4, R4_ARG2);
1853252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -4);
1854252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -4);
1855252206Sdavidcs      __ bdnz(l_3);
1856252206Sdavidcs
1857252206Sdavidcs      __ bind(l_6);
1858252206Sdavidcs    }
1859252206Sdavidcs  }
1860252206Sdavidcs
1861252206Sdavidcs  // Generate stub for conjoint int copy.  If "aligned" is true, the
1862252206Sdavidcs  // "from" and "to" addresses are assumed to be heapword aligned.
1863252206Sdavidcs  //
1864252206Sdavidcs  // Arguments for generated stub:
1865252206Sdavidcs  //      from:  R3_ARG1
1866252206Sdavidcs  //      to:    R4_ARG2
1867252206Sdavidcs  //      count: R5_ARG3 treated as signed
1868252206Sdavidcs  //
1869252206Sdavidcs  address generate_conjoint_int_copy(bool aligned, const char * name) {
1870252206Sdavidcs    StubCodeMark mark(this, "StubRoutines", name);
1871252206Sdavidcs    address start = __ function_entry();
1872252206Sdavidcs    assert_positive_int(R5_ARG3);
1873252206Sdavidcs    address nooverlap_target = aligned ?
1874252206Sdavidcs      STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
1875252206Sdavidcs      STUB_ENTRY(jint_disjoint_arraycopy);
1876252206Sdavidcs
1877252206Sdavidcs    array_overlap_test(nooverlap_target, 2);
1878252206Sdavidcs
1879252206Sdavidcs    generate_conjoint_int_copy_core(aligned);
1880252206Sdavidcs
1881252206Sdavidcs    __ li(R3_RET, 0); // return 0
1882252206Sdavidcs    __ blr();
1883252206Sdavidcs
1884252206Sdavidcs    return start;
1885252206Sdavidcs  }
1886252206Sdavidcs
1887252206Sdavidcs  // Generate core code for disjoint long copy (and oop copy on
1888252206Sdavidcs  // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1889252206Sdavidcs  // are assumed to be heapword aligned.
1890252206Sdavidcs  //
1891252206Sdavidcs  // Arguments:
1892252206Sdavidcs  //      from:  R3_ARG1
1893252206Sdavidcs  //      to:    R4_ARG2
1894252206Sdavidcs  //      count: R5_ARG3 treated as signed
1895252206Sdavidcs  //
1896252206Sdavidcs  void generate_disjoint_long_copy_core(bool aligned) {
1897252206Sdavidcs    Register tmp1 = R6_ARG4;
1898252206Sdavidcs    Register tmp2 = R7_ARG5;
1899252206Sdavidcs    Register tmp3 = R8_ARG6;
1900252206Sdavidcs    Register tmp4 = R0;
1901252206Sdavidcs
1902252206Sdavidcs    Label l_1, l_2, l_3, l_4, l_5;
1903252206Sdavidcs
1904252206Sdavidcs    VectorSRegister tmp_vsr1  = VSR1;
1905252206Sdavidcs    VectorSRegister tmp_vsr2  = VSR2;
1906252206Sdavidcs
1907252206Sdavidcs    { // FasterArrayCopy
1908252206Sdavidcs      __ cmpwi(CCR0, R5_ARG3, 3);
1909252206Sdavidcs      __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1910252206Sdavidcs
1911252206Sdavidcs      __ srdi(tmp1, R5_ARG3, 2);
1912252206Sdavidcs      __ andi_(R5_ARG3, R5_ARG3, 3);
1913252206Sdavidcs      __ mtctr(tmp1);
1914252206Sdavidcs
1915252206Sdavidcs    if (!VM_Version::has_vsx()) {
1916252206Sdavidcs      __ bind(l_4);
1917252206Sdavidcs      // Use unrolled version for mass copying (copy 4 elements a time).
1918252206Sdavidcs      // Load feeding store gets zero latency on Power6, however not on Power5.
1919252206Sdavidcs      // Therefore, the following sequence is made for the good of both.
1920252206Sdavidcs      __ ld(tmp1, 0, R3_ARG1);
1921252206Sdavidcs      __ ld(tmp2, 8, R3_ARG1);
1922252206Sdavidcs      __ ld(tmp3, 16, R3_ARG1);
1923252206Sdavidcs      __ ld(tmp4, 24, R3_ARG1);
1924252206Sdavidcs      __ std(tmp1, 0, R4_ARG2);
1925252206Sdavidcs      __ std(tmp2, 8, R4_ARG2);
1926252206Sdavidcs      __ std(tmp3, 16, R4_ARG2);
1927252206Sdavidcs      __ std(tmp4, 24, R4_ARG2);
1928252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 32);
1929252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 32);
1930252206Sdavidcs      __ bdnz(l_4);
1931252206Sdavidcs
1932252206Sdavidcs    } else { // Processor supports VSX, so use it to mass copy.
1933252206Sdavidcs
1934252206Sdavidcs      // Prefetch the data into the L2 cache.
1935252206Sdavidcs      __ dcbt(R3_ARG1, 0);
1936252206Sdavidcs
1937252206Sdavidcs      // If supported set DSCR pre-fetch to deepest.
1938252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1939252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
1940252206Sdavidcs        __ mtdscr(tmp2);
1941252206Sdavidcs      }
1942252206Sdavidcs
1943252206Sdavidcs      __ li(tmp1, 16);
1944252206Sdavidcs
1945252206Sdavidcs      // Backbranch target aligned to 32-byte. Not 16-byte align as
1946252206Sdavidcs      // loop contains < 8 instructions that fit inside a single
1947252206Sdavidcs      // i-cache sector.
1948252206Sdavidcs      __ align(32);
1949252206Sdavidcs
1950252206Sdavidcs      __ bind(l_5);
1951252206Sdavidcs      // Use loop with VSX load/store instructions to
1952252206Sdavidcs      // copy 4 elements a time.
1953252206Sdavidcs      __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
1954252206Sdavidcs      __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
1955252206Sdavidcs      __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src + 16
1956252206Sdavidcs      __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst + 16
1957252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, 32);       // Update src+=32
1958252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, 32);       // Update dsc+=32
1959252206Sdavidcs      __ bdnz(l_5);                        // Dec CTR and loop if not zero.
1960252206Sdavidcs
1961252206Sdavidcs      // Restore DSCR pre-fetch value.
1962252206Sdavidcs      if (VM_Version::has_mfdscr()) {
1963252206Sdavidcs        __ load_const_optimized(tmp2, VM_Version::_dscr_val);
1964252206Sdavidcs        __ mtdscr(tmp2);
1965252206Sdavidcs      }
1966252206Sdavidcs
1967252206Sdavidcs    } // VSX
1968252206Sdavidcs   } // FasterArrayCopy
1969252206Sdavidcs
1970252206Sdavidcs    // copy 1 element at a time
1971252206Sdavidcs    __ bind(l_3);
1972252206Sdavidcs    __ cmpwi(CCR0, R5_ARG3, 0);
1973252206Sdavidcs    __ beq(CCR0, l_1);
1974252206Sdavidcs
1975252206Sdavidcs    { // FasterArrayCopy
1976252206Sdavidcs      __ mtctr(R5_ARG3);
1977252206Sdavidcs      __ addi(R3_ARG1, R3_ARG1, -8);
1978252206Sdavidcs      __ addi(R4_ARG2, R4_ARG2, -8);
1979252206Sdavidcs
1980252206Sdavidcs      __ bind(l_2);
1981252206Sdavidcs      __ ldu(R0, 8, R3_ARG1);
1982252206Sdavidcs      __ stdu(R0, 8, R4_ARG2);
1983252206Sdavidcs      __ bdnz(l_2);
1984252206Sdavidcs
1985252206Sdavidcs    }
1986252206Sdavidcs    __ bind(l_1);
1987252206Sdavidcs  }
1988252206Sdavidcs
1989252206Sdavidcs  // Generate stub for disjoint long copy.  If "aligned" is true, the
1990252206Sdavidcs  // "from" and "to" addresses are assumed to be heapword aligned.
1991252206Sdavidcs  //
1992252206Sdavidcs  // Arguments for generated stub:
1993  //      from:  R3_ARG1
1994  //      to:    R4_ARG2
1995  //      count: R5_ARG3 treated as signed
1996  //
1997  address generate_disjoint_long_copy(bool aligned, const char * name) {
1998    StubCodeMark mark(this, "StubRoutines", name);
1999    address start = __ function_entry();
2000    assert_positive_int(R5_ARG3);
2001    generate_disjoint_long_copy_core(aligned);
2002    __ li(R3_RET, 0); // return 0
2003    __ blr();
2004
2005    return start;
2006  }
2007
2008  // Generate core code for conjoint long copy (and oop copy on
2009  // 64-bit).  If "aligned" is true, the "from" and "to" addresses
2010  // are assumed to be heapword aligned.
2011  //
2012  // Arguments:
2013  //      from:  R3_ARG1
2014  //      to:    R4_ARG2
2015  //      count: R5_ARG3 treated as signed
2016  //
2017  void generate_conjoint_long_copy_core(bool aligned) {
2018    Register tmp1 = R6_ARG4;
2019    Register tmp2 = R7_ARG5;
2020    Register tmp3 = R8_ARG6;
2021    Register tmp4 = R0;
2022
2023    VectorSRegister tmp_vsr1  = VSR1;
2024    VectorSRegister tmp_vsr2  = VSR2;
2025
2026    Label l_1, l_2, l_3, l_4, l_5;
2027
2028    __ cmpwi(CCR0, R5_ARG3, 0);
2029    __ beq(CCR0, l_1);
2030
2031    { // FasterArrayCopy
2032      __ sldi(R5_ARG3, R5_ARG3, 3);
2033      __ add(R3_ARG1, R3_ARG1, R5_ARG3);
2034      __ add(R4_ARG2, R4_ARG2, R5_ARG3);
2035      __ srdi(R5_ARG3, R5_ARG3, 3);
2036
2037      __ cmpwi(CCR0, R5_ARG3, 3);
2038      __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
2039
2040      __ srdi(tmp1, R5_ARG3, 2);
2041      __ andi(R5_ARG3, R5_ARG3, 3);
2042      __ mtctr(tmp1);
2043
2044     if (!VM_Version::has_vsx()) {
2045      __ bind(l_4);
2046      // Use unrolled version for mass copying (copy 4 elements a time).
2047      // Load feeding store gets zero latency on Power6, however not on Power5.
2048      // Therefore, the following sequence is made for the good of both.
2049      __ addi(R3_ARG1, R3_ARG1, -32);
2050      __ addi(R4_ARG2, R4_ARG2, -32);
2051      __ ld(tmp4, 24, R3_ARG1);
2052      __ ld(tmp3, 16, R3_ARG1);
2053      __ ld(tmp2, 8, R3_ARG1);
2054      __ ld(tmp1, 0, R3_ARG1);
2055      __ std(tmp4, 24, R4_ARG2);
2056      __ std(tmp3, 16, R4_ARG2);
2057      __ std(tmp2, 8, R4_ARG2);
2058      __ std(tmp1, 0, R4_ARG2);
2059      __ bdnz(l_4);
2060     } else { // Processor supports VSX, so use it to mass copy.
2061      // Prefetch the data into the L2 cache.
2062      __ dcbt(R3_ARG1, 0);
2063
2064      // If supported set DSCR pre-fetch to deepest.
2065      if (VM_Version::has_mfdscr()) {
2066        __ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
2067        __ mtdscr(tmp2);
2068      }
2069
2070      __ li(tmp1, 16);
2071
2072      // Backbranch target aligned to 32-byte. Not 16-byte align as
2073      // loop contains < 8 instructions that fit inside a single
2074      // i-cache sector.
2075      __ align(32);
2076
2077      __ bind(l_4);
2078      // Use loop with VSX load/store instructions to
2079      // copy 4 elements a time.
2080      __ addi(R3_ARG1, R3_ARG1, -32);      // Update src-=32
2081      __ addi(R4_ARG2, R4_ARG2, -32);      // Update dsc-=32
2082      __ lxvd2x(tmp_vsr2, tmp1, R3_ARG1);  // Load src+16
2083      __ lxvd2x(tmp_vsr1, R3_ARG1);        // Load src
2084      __ stxvd2x(tmp_vsr2, tmp1, R4_ARG2); // Store to dst+16
2085      __ stxvd2x(tmp_vsr1, R4_ARG2);       // Store to dst
2086      __ bdnz(l_4);
2087
2088      // Restore DSCR pre-fetch value.
2089      if (VM_Version::has_mfdscr()) {
2090        __ load_const_optimized(tmp2, VM_Version::_dscr_val);
2091        __ mtdscr(tmp2);
2092      }
2093     }
2094
2095      __ cmpwi(CCR0, R5_ARG3, 0);
2096      __ beq(CCR0, l_1);
2097
2098      __ bind(l_5);
2099      __ mtctr(R5_ARG3);
2100      __ bind(l_3);
2101      __ ld(R0, -8, R3_ARG1);
2102      __ std(R0, -8, R4_ARG2);
2103      __ addi(R3_ARG1, R3_ARG1, -8);
2104      __ addi(R4_ARG2, R4_ARG2, -8);
2105      __ bdnz(l_3);
2106
2107    }
2108    __ bind(l_1);
2109  }
2110
2111  // Generate stub for conjoint long copy.  If "aligned" is true, the
2112  // "from" and "to" addresses are assumed to be heapword aligned.
2113  //
2114  // Arguments for generated stub:
2115  //      from:  R3_ARG1
2116  //      to:    R4_ARG2
2117  //      count: R5_ARG3 treated as signed
2118  //
2119  address generate_conjoint_long_copy(bool aligned, const char * name) {
2120    StubCodeMark mark(this, "StubRoutines", name);
2121    address start = __ function_entry();
2122    assert_positive_int(R5_ARG3);
2123    address nooverlap_target = aligned ?
2124      STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
2125      STUB_ENTRY(jlong_disjoint_arraycopy);
2126
2127    array_overlap_test(nooverlap_target, 3);
2128    generate_conjoint_long_copy_core(aligned);
2129
2130    __ li(R3_RET, 0); // return 0
2131    __ blr();
2132
2133    return start;
2134  }
2135
2136  // Generate stub for conjoint oop copy.  If "aligned" is true, the
2137  // "from" and "to" addresses are assumed to be heapword aligned.
2138  //
2139  // Arguments for generated stub:
2140  //      from:  R3_ARG1
2141  //      to:    R4_ARG2
2142  //      count: R5_ARG3 treated as signed
2143  //      dest_uninitialized: G1 support
2144  //
2145  address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2146    StubCodeMark mark(this, "StubRoutines", name);
2147
2148    address start = __ function_entry();
2149    assert_positive_int(R5_ARG3);
2150    address nooverlap_target = aligned ?
2151      STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
2152      STUB_ENTRY(oop_disjoint_arraycopy);
2153
2154    gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
2155
2156    // Save arguments.
2157    __ mr(R9_ARG7, R4_ARG2);
2158    __ mr(R10_ARG8, R5_ARG3);
2159
2160    if (UseCompressedOops) {
2161      array_overlap_test(nooverlap_target, 2);
2162      generate_conjoint_int_copy_core(aligned);
2163    } else {
2164      array_overlap_test(nooverlap_target, 3);
2165      generate_conjoint_long_copy_core(aligned);
2166    }
2167
2168    gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
2169    __ li(R3_RET, 0); // return 0
2170    __ blr();
2171    return start;
2172  }
2173
2174  // Generate stub for disjoint oop copy.  If "aligned" is true, the
2175  // "from" and "to" addresses are assumed to be heapword aligned.
2176  //
2177  // Arguments for generated stub:
2178  //      from:  R3_ARG1
2179  //      to:    R4_ARG2
2180  //      count: R5_ARG3 treated as signed
2181  //      dest_uninitialized: G1 support
2182  //
2183  address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
2184    StubCodeMark mark(this, "StubRoutines", name);
2185    address start = __ function_entry();
2186    assert_positive_int(R5_ARG3);
2187    gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
2188
2189    // save some arguments, disjoint_long_copy_core destroys them.
2190    // needed for post barrier
2191    __ mr(R9_ARG7, R4_ARG2);
2192    __ mr(R10_ARG8, R5_ARG3);
2193
2194    if (UseCompressedOops) {
2195      generate_disjoint_int_copy_core(aligned);
2196    } else {
2197      generate_disjoint_long_copy_core(aligned);
2198    }
2199
2200    gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
2201    __ li(R3_RET, 0); // return 0
2202    __ blr();
2203
2204    return start;
2205  }
2206
2207
2208  // Helper for generating a dynamic type check.
2209  // Smashes only the given temp registers.
2210  void generate_type_check(Register sub_klass,
2211                           Register super_check_offset,
2212                           Register super_klass,
2213                           Register temp,
2214                           Label& L_success) {
2215    assert_different_registers(sub_klass, super_check_offset, super_klass);
2216
2217    BLOCK_COMMENT("type_check:");
2218
2219    Label L_miss;
2220
2221    __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL,
2222                                     super_check_offset);
2223    __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL);
2224
2225    // Fall through on failure!
2226    __ bind(L_miss);
2227  }
2228
2229
2230  //  Generate stub for checked oop copy.
2231  //
2232  // Arguments for generated stub:
2233  //      from:  R3
2234  //      to:    R4
2235  //      count: R5 treated as signed
2236  //      ckoff: R6 (super_check_offset)
2237  //      ckval: R7 (super_klass)
2238  //      ret:   R3 zero for success; (-1^K) where K is partial transfer count
2239  //
2240  address generate_checkcast_copy(const char *name, bool dest_uninitialized) {
2241
2242    const Register R3_from   = R3_ARG1;      // source array address
2243    const Register R4_to     = R4_ARG2;      // destination array address
2244    const Register R5_count  = R5_ARG3;      // elements count
2245    const Register R6_ckoff  = R6_ARG4;      // super_check_offset
2246    const Register R7_ckval  = R7_ARG5;      // super_klass
2247
2248    const Register R8_offset = R8_ARG6;      // loop var, with stride wordSize
2249    const Register R9_remain = R9_ARG7;      // loop var, with stride -1
2250    const Register R10_oop   = R10_ARG8;     // actual oop copied
2251    const Register R11_klass = R11_scratch1; // oop._klass
2252    const Register R12_tmp   = R12_scratch2;
2253
2254    const Register R2_minus1 = R2;
2255
2256    //__ align(CodeEntryAlignment);
2257    StubCodeMark mark(this, "StubRoutines", name);
2258    address start = __ function_entry();
2259
2260    // Assert that int is 64 bit sign extended and arrays are not conjoint.
2261#ifdef ASSERT
2262    {
2263    assert_positive_int(R5_ARG3);
2264    const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2265    Label no_overlap;
2266    __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2267    __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2268    __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2269    __ cmpld(CCR1, tmp1, tmp2);
2270    __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2271    // Overlaps if Src before dst and distance smaller than size.
2272    // Branch to forward copy routine otherwise.
2273    __ blt(CCR0, no_overlap);
2274    __ stop("overlap in checkcast_copy", 0x9543);
2275    __ bind(no_overlap);
2276    }
2277#endif
2278
2279    gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
2280
2281    //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2282
2283    Label load_element, store_element, store_null, success, do_card_marks;
2284    __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2285    __ li(R8_offset, 0);                   // Offset from start of arrays.
2286    __ li(R2_minus1, -1);
2287    __ bne(CCR0, load_element);
2288
2289    // Empty array: Nothing to do.
2290    __ li(R3_RET, 0);           // Return 0 on (trivial) success.
2291    __ blr();
2292
2293    // ======== begin loop ========
2294    // (Entry is load_element.)
2295    __ align(OptoLoopAlignment);
2296    __ bind(store_element);
2297    if (UseCompressedOops) {
2298      __ encode_heap_oop_not_null(R10_oop);
2299      __ bind(store_null);
2300      __ stw(R10_oop, R8_offset, R4_to);
2301    } else {
2302      __ bind(store_null);
2303      __ std(R10_oop, R8_offset, R4_to);
2304    }
2305
2306    __ addi(R8_offset, R8_offset, heapOopSize);   // Step to next offset.
2307    __ add_(R9_remain, R2_minus1, R9_remain);     // Decrement the count.
2308    __ beq(CCR0, success);
2309
2310    // ======== loop entry is here ========
2311    __ bind(load_element);
2312    __ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null);  // Load the oop.
2313
2314    __ load_klass(R11_klass, R10_oop); // Query the object klass.
2315
2316    generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2317                        // Branch to this on success:
2318                        store_element);
2319    // ======== end loop ========
2320
2321    // It was a real error; we must depend on the caller to finish the job.
2322    // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2323    // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2324    // and report their number to the caller.
2325    __ subf_(R5_count, R9_remain, R5_count);
2326    __ nand(R3_RET, R5_count, R5_count);   // report (-1^K) to caller
2327    __ bne(CCR0, do_card_marks);
2328    __ blr();
2329
2330    __ bind(success);
2331    __ li(R3_RET, 0);
2332
2333    __ bind(do_card_marks);
2334    // Store check on R4_to[0..R5_count-1].
2335    gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
2336    __ blr();
2337    return start;
2338  }
2339
2340
2341  //  Generate 'unsafe' array copy stub.
2342  //  Though just as safe as the other stubs, it takes an unscaled
2343  //  size_t argument instead of an element count.
2344  //
2345  // Arguments for generated stub:
2346  //      from:  R3
2347  //      to:    R4
2348  //      count: R5 byte count, treated as ssize_t, can be zero
2349  //
2350  // Examines the alignment of the operands and dispatches
2351  // to a long, int, short, or byte copy loop.
2352  //
2353  address generate_unsafe_copy(const char* name,
2354                               address byte_copy_entry,
2355                               address short_copy_entry,
2356                               address int_copy_entry,
2357                               address long_copy_entry) {
2358
2359    const Register R3_from   = R3_ARG1;      // source array address
2360    const Register R4_to     = R4_ARG2;      // destination array address
2361    const Register R5_count  = R5_ARG3;      // elements count (as long on PPC64)
2362
2363    const Register R6_bits   = R6_ARG4;      // test copy of low bits
2364    const Register R7_tmp    = R7_ARG5;
2365
2366    //__ align(CodeEntryAlignment);
2367    StubCodeMark mark(this, "StubRoutines", name);
2368    address start = __ function_entry();
2369
2370    // Bump this on entry, not on exit:
2371    //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp);
2372
2373    Label short_copy, int_copy, long_copy;
2374
2375    __ orr(R6_bits, R3_from, R4_to);
2376    __ orr(R6_bits, R6_bits, R5_count);
2377    __ andi_(R0, R6_bits, (BytesPerLong-1));
2378    __ beq(CCR0, long_copy);
2379
2380    __ andi_(R0, R6_bits, (BytesPerInt-1));
2381    __ beq(CCR0, int_copy);
2382
2383    __ andi_(R0, R6_bits, (BytesPerShort-1));
2384    __ beq(CCR0, short_copy);
2385
2386    // byte_copy:
2387    __ b(byte_copy_entry);
2388
2389    __ bind(short_copy);
2390    __ srwi(R5_count, R5_count, LogBytesPerShort);
2391    __ b(short_copy_entry);
2392
2393    __ bind(int_copy);
2394    __ srwi(R5_count, R5_count, LogBytesPerInt);
2395    __ b(int_copy_entry);
2396
2397    __ bind(long_copy);
2398    __ srwi(R5_count, R5_count, LogBytesPerLong);
2399    __ b(long_copy_entry);
2400
2401    return start;
2402  }
2403
2404
2405  // Perform range checks on the proposed arraycopy.
2406  // Kills the two temps, but nothing else.
2407  // Also, clean the sign bits of src_pos and dst_pos.
2408  void arraycopy_range_checks(Register src,     // source array oop
2409                              Register src_pos, // source position
2410                              Register dst,     // destination array oop
2411                              Register dst_pos, // destination position
2412                              Register length,  // length of copy
2413                              Register temp1, Register temp2,
2414                              Label& L_failed) {
2415    BLOCK_COMMENT("arraycopy_range_checks:");
2416
2417    const Register array_length = temp1;  // scratch
2418    const Register end_pos      = temp2;  // scratch
2419
2420    //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2421    __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src);
2422    __ add(end_pos, src_pos, length);  // src_pos + length
2423    __ cmpd(CCR0, end_pos, array_length);
2424    __ bgt(CCR0, L_failed);
2425
2426    //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2427    __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst);
2428    __ add(end_pos, dst_pos, length);  // src_pos + length
2429    __ cmpd(CCR0, end_pos, array_length);
2430    __ bgt(CCR0, L_failed);
2431
2432    BLOCK_COMMENT("arraycopy_range_checks done");
2433  }
2434
2435
2436  //
2437  //  Generate generic array copy stubs
2438  //
2439  //  Input:
2440  //    R3    -  src oop
2441  //    R4    -  src_pos
2442  //    R5    -  dst oop
2443  //    R6    -  dst_pos
2444  //    R7    -  element count
2445  //
2446  //  Output:
2447  //    R3 ==  0  -  success
2448  //    R3 == -1  -  need to call System.arraycopy
2449  //
2450  address generate_generic_copy(const char *name,
2451                                address entry_jbyte_arraycopy,
2452                                address entry_jshort_arraycopy,
2453                                address entry_jint_arraycopy,
2454                                address entry_oop_arraycopy,
2455                                address entry_disjoint_oop_arraycopy,
2456                                address entry_jlong_arraycopy,
2457                                address entry_checkcast_arraycopy) {
2458    Label L_failed, L_objArray;
2459
2460    // Input registers
2461    const Register src       = R3_ARG1;  // source array oop
2462    const Register src_pos   = R4_ARG2;  // source position
2463    const Register dst       = R5_ARG3;  // destination array oop
2464    const Register dst_pos   = R6_ARG4;  // destination position
2465    const Register length    = R7_ARG5;  // elements count
2466
2467    // registers used as temp
2468    const Register src_klass = R8_ARG6;  // source array klass
2469    const Register dst_klass = R9_ARG7;  // destination array klass
2470    const Register lh        = R10_ARG8; // layout handler
2471    const Register temp      = R2;
2472
2473    //__ align(CodeEntryAlignment);
2474    StubCodeMark mark(this, "StubRoutines", name);
2475    address start = __ function_entry();
2476
2477    // Bump this on entry, not on exit:
2478    //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp);
2479
2480    // In principle, the int arguments could be dirty.
2481
2482    //-----------------------------------------------------------------------
2483    // Assembler stubs will be used for this call to arraycopy
2484    // if the following conditions are met:
2485    //
2486    // (1) src and dst must not be null.
2487    // (2) src_pos must not be negative.
2488    // (3) dst_pos must not be negative.
2489    // (4) length  must not be negative.
2490    // (5) src klass and dst klass should be the same and not NULL.
2491    // (6) src and dst should be arrays.
2492    // (7) src_pos + length must not exceed length of src.
2493    // (8) dst_pos + length must not exceed length of dst.
2494    BLOCK_COMMENT("arraycopy initial argument checks");
2495
2496    __ cmpdi(CCR1, src, 0);      // if (src == NULL) return -1;
2497    __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
2498    __ cmpdi(CCR5, dst, 0);      // if (dst == NULL) return -1;
2499    __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2500    __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
2501    __ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
2502    __ extsw_(length, length);   // if (length < 0) return -1;
2503    __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal);
2504    __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2505    __ beq(CCR1, L_failed);
2506
2507    BLOCK_COMMENT("arraycopy argument klass checks");
2508    __ load_klass(src_klass, src);
2509    __ load_klass(dst_klass, dst);
2510
2511    // Load layout helper
2512    //
2513    //  |array_tag|     | header_size | element_type |     |log2_element_size|
2514    // 32        30    24            16              8     2                 0
2515    //
2516    //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2517    //
2518
2519    int lh_offset = in_bytes(Klass::layout_helper_offset());
2520
2521    // Load 32-bits signed value. Use br() instruction with it to check icc.
2522    __ lwz(lh, lh_offset, src_klass);
2523
2524    // Handle objArrays completely differently...
2525    jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2526    __ load_const_optimized(temp, objArray_lh, R0);
2527    __ cmpw(CCR0, lh, temp);
2528    __ beq(CCR0, L_objArray);
2529
2530    __ cmpd(CCR5, src_klass, dst_klass);          // if (src->klass() != dst->klass()) return -1;
2531    __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1;
2532
2533    __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less);
2534    __ beq(CCR5, L_failed);
2535
2536    // At this point, it is known to be a typeArray (array_tag 0x3).
2537#ifdef ASSERT
2538    { Label L;
2539      jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2540      __ load_const_optimized(temp, lh_prim_tag_in_place, R0);
2541      __ cmpw(CCR0, lh, temp);
2542      __ bge(CCR0, L);
2543      __ stop("must be a primitive array");
2544      __ bind(L);
2545    }
2546#endif
2547
2548    arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2549                           temp, dst_klass, L_failed);
2550
2551    // TypeArrayKlass
2552    //
2553    // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2554    // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2555    //
2556
2557    const Register offset = dst_klass;    // array offset
2558    const Register elsize = src_klass;    // log2 element size
2559
2560    __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1));
2561    __ andi(elsize, lh, Klass::_lh_log2_element_size_mask);
2562    __ add(src, offset, src);       // src array offset
2563    __ add(dst, offset, dst);       // dst array offset
2564
2565    // Next registers should be set before the jump to corresponding stub.
2566    const Register from     = R3_ARG1;  // source array address
2567    const Register to       = R4_ARG2;  // destination array address
2568    const Register count    = R5_ARG3;  // elements count
2569
2570    // 'from', 'to', 'count' registers should be set in this order
2571    // since they are the same as 'src', 'src_pos', 'dst'.
2572
2573    BLOCK_COMMENT("scale indexes to element size");
2574    __ sld(src_pos, src_pos, elsize);
2575    __ sld(dst_pos, dst_pos, elsize);
2576    __ add(from, src_pos, src);  // src_addr
2577    __ add(to, dst_pos, dst);    // dst_addr
2578    __ mr(count, length);        // length
2579
2580    BLOCK_COMMENT("choose copy loop based on element size");
2581    // Using conditional branches with range 32kB.
2582    const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal);
2583    __ cmpwi(CCR0, elsize, 0);
2584    __ bc(bo, bi, entry_jbyte_arraycopy);
2585    __ cmpwi(CCR0, elsize, LogBytesPerShort);
2586    __ bc(bo, bi, entry_jshort_arraycopy);
2587    __ cmpwi(CCR0, elsize, LogBytesPerInt);
2588    __ bc(bo, bi, entry_jint_arraycopy);
2589#ifdef ASSERT
2590    { Label L;
2591      __ cmpwi(CCR0, elsize, LogBytesPerLong);
2592      __ beq(CCR0, L);
2593      __ stop("must be long copy, but elsize is wrong");
2594      __ bind(L);
2595    }
2596#endif
2597    __ b(entry_jlong_arraycopy);
2598
2599    // ObjArrayKlass
2600  __ bind(L_objArray);
2601    // live at this point:  src_klass, dst_klass, src[_pos], dst[_pos], length
2602
2603    Label L_disjoint_plain_copy, L_checkcast_copy;
2604    //  test array classes for subtyping
2605    __ cmpd(CCR0, src_klass, dst_klass);         // usual case is exact equality
2606    __ bne(CCR0, L_checkcast_copy);
2607
2608    // Identically typed arrays can be copied without element-wise checks.
2609    arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2610                           temp, lh, L_failed);
2611
2612    __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2613    __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2614    __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2615    __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2616    __ add(from, src_pos, src);  // src_addr
2617    __ add(to, dst_pos, dst);    // dst_addr
2618    __ mr(count, length);        // length
2619    __ b(entry_oop_arraycopy);
2620
2621  __ bind(L_checkcast_copy);
2622    // live at this point:  src_klass, dst_klass
2623    {
2624      // Before looking at dst.length, make sure dst is also an objArray.
2625      __ lwz(temp, lh_offset, dst_klass);
2626      __ cmpw(CCR0, lh, temp);
2627      __ bne(CCR0, L_failed);
2628
2629      // It is safe to examine both src.length and dst.length.
2630      arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2631                             temp, lh, L_failed);
2632
2633      // Marshal the base address arguments now, freeing registers.
2634      __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2635      __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2636      __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2637      __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2638      __ add(from, src_pos, src);  // src_addr
2639      __ add(to, dst_pos, dst);    // dst_addr
2640      __ mr(count, length);        // length
2641
2642      Register sco_temp = R6_ARG4;             // This register is free now.
2643      assert_different_registers(from, to, count, sco_temp,
2644                                 dst_klass, src_klass);
2645
2646      // Generate the type check.
2647      int sco_offset = in_bytes(Klass::super_check_offset_offset());
2648      __ lwz(sco_temp, sco_offset, dst_klass);
2649      generate_type_check(src_klass, sco_temp, dst_klass,
2650                          temp, L_disjoint_plain_copy);
2651
2652      // Fetch destination element klass from the ObjArrayKlass header.
2653      int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2654
2655      // The checkcast_copy loop needs two extra arguments:
2656      __ ld(R7_ARG5, ek_offset, dst_klass);   // dest elem klass
2657      __ lwz(R6_ARG4, sco_offset, R7_ARG5);   // sco of elem klass
2658      __ b(entry_checkcast_arraycopy);
2659    }
2660
2661    __ bind(L_disjoint_plain_copy);
2662    __ b(entry_disjoint_oop_arraycopy);
2663
2664  __ bind(L_failed);
2665    __ li(R3_RET, -1); // return -1
2666    __ blr();
2667    return start;
2668  }
2669
2670  // Arguments for generated stub (little endian only):
2671  //   R3_ARG1   - source byte array address
2672  //   R4_ARG2   - destination byte array address
2673  //   R5_ARG3   - round key array
2674  address generate_aescrypt_encryptBlock() {
2675    assert(UseAES, "need AES instructions and misaligned SSE support");
2676    StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
2677
2678    address start = __ function_entry();
2679
2680    Label L_doLast;
2681
2682    Register from           = R3_ARG1;  // source array address
2683    Register to             = R4_ARG2;  // destination array address
2684    Register key            = R5_ARG3;  // round key array
2685
2686    Register keylen         = R8;
2687    Register temp           = R9;
2688    Register keypos         = R10;
2689    Register hex            = R11;
2690    Register fifteen        = R12;
2691
2692    VectorRegister vRet     = VR0;
2693
2694    VectorRegister vKey1    = VR1;
2695    VectorRegister vKey2    = VR2;
2696    VectorRegister vKey3    = VR3;
2697    VectorRegister vKey4    = VR4;
2698
2699    VectorRegister fromPerm = VR5;
2700    VectorRegister keyPerm  = VR6;
2701    VectorRegister toPerm   = VR7;
2702    VectorRegister fSplt    = VR8;
2703
2704    VectorRegister vTmp1    = VR9;
2705    VectorRegister vTmp2    = VR10;
2706    VectorRegister vTmp3    = VR11;
2707    VectorRegister vTmp4    = VR12;
2708
2709    VectorRegister vLow     = VR13;
2710    VectorRegister vHigh    = VR14;
2711
2712    __ li              (hex, 16);
2713    __ li              (fifteen, 15);
2714    __ vspltisb        (fSplt, 0x0f);
2715
2716    // load unaligned from[0-15] to vsRet
2717    __ lvx             (vRet, from);
2718    __ lvx             (vTmp1, fifteen, from);
2719    __ lvsl            (fromPerm, from);
2720    __ vxor            (fromPerm, fromPerm, fSplt);
2721    __ vperm           (vRet, vRet, vTmp1, fromPerm);
2722
2723    // load keylen (44 or 52 or 60)
2724    __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2725
2726    // to load keys
2727    __ lvsr            (keyPerm, key);
2728    __ vxor            (vTmp2, vTmp2, vTmp2);
2729    __ vspltisb        (vTmp2, -16);
2730    __ vrld            (keyPerm, keyPerm, vTmp2);
2731    __ vrld            (keyPerm, keyPerm, vTmp2);
2732    __ vsldoi          (keyPerm, keyPerm, keyPerm, -8);
2733
2734    // load the 1st round key to vKey1
2735    __ li              (keypos, 0);
2736    __ lvx             (vKey1, keypos, key);
2737    __ addi            (keypos, keypos, 16);
2738    __ lvx             (vTmp1, keypos, key);
2739    __ vperm           (vKey1, vTmp1, vKey1, keyPerm);
2740
2741    // 1st round
2742    __ vxor (vRet, vRet, vKey1);
2743
2744    // load the 2nd round key to vKey1
2745    __ addi            (keypos, keypos, 16);
2746    __ lvx             (vTmp2, keypos, key);
2747    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2748
2749    // load the 3rd round key to vKey2
2750    __ addi            (keypos, keypos, 16);
2751    __ lvx             (vTmp1, keypos, key);
2752    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2753
2754    // load the 4th round key to vKey3
2755    __ addi            (keypos, keypos, 16);
2756    __ lvx             (vTmp2, keypos, key);
2757    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2758
2759    // load the 5th round key to vKey4
2760    __ addi            (keypos, keypos, 16);
2761    __ lvx             (vTmp1, keypos, key);
2762    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2763
2764    // 2nd - 5th rounds
2765    __ vcipher (vRet, vRet, vKey1);
2766    __ vcipher (vRet, vRet, vKey2);
2767    __ vcipher (vRet, vRet, vKey3);
2768    __ vcipher (vRet, vRet, vKey4);
2769
2770    // load the 6th round key to vKey1
2771    __ addi            (keypos, keypos, 16);
2772    __ lvx             (vTmp2, keypos, key);
2773    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2774
2775    // load the 7th round key to vKey2
2776    __ addi            (keypos, keypos, 16);
2777    __ lvx             (vTmp1, keypos, key);
2778    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2779
2780    // load the 8th round key to vKey3
2781    __ addi            (keypos, keypos, 16);
2782    __ lvx             (vTmp2, keypos, key);
2783    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2784
2785    // load the 9th round key to vKey4
2786    __ addi            (keypos, keypos, 16);
2787    __ lvx             (vTmp1, keypos, key);
2788    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2789
2790    // 6th - 9th rounds
2791    __ vcipher (vRet, vRet, vKey1);
2792    __ vcipher (vRet, vRet, vKey2);
2793    __ vcipher (vRet, vRet, vKey3);
2794    __ vcipher (vRet, vRet, vKey4);
2795
2796    // load the 10th round key to vKey1
2797    __ addi            (keypos, keypos, 16);
2798    __ lvx             (vTmp2, keypos, key);
2799    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2800
2801    // load the 11th round key to vKey2
2802    __ addi            (keypos, keypos, 16);
2803    __ lvx             (vTmp1, keypos, key);
2804    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2805
2806    // if all round keys are loaded, skip next 4 rounds
2807    __ cmpwi           (CCR0, keylen, 44);
2808    __ beq             (CCR0, L_doLast);
2809
2810    // 10th - 11th rounds
2811    __ vcipher (vRet, vRet, vKey1);
2812    __ vcipher (vRet, vRet, vKey2);
2813
2814    // load the 12th round key to vKey1
2815    __ addi            (keypos, keypos, 16);
2816    __ lvx             (vTmp2, keypos, key);
2817    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2818
2819    // load the 13th round key to vKey2
2820    __ addi            (keypos, keypos, 16);
2821    __ lvx             (vTmp1, keypos, key);
2822    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2823
2824    // if all round keys are loaded, skip next 2 rounds
2825    __ cmpwi           (CCR0, keylen, 52);
2826    __ beq             (CCR0, L_doLast);
2827
2828    // 12th - 13th rounds
2829    __ vcipher (vRet, vRet, vKey1);
2830    __ vcipher (vRet, vRet, vKey2);
2831
2832    // load the 14th round key to vKey1
2833    __ addi            (keypos, keypos, 16);
2834    __ lvx             (vTmp2, keypos, key);
2835    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2836
2837    // load the 15th round key to vKey2
2838    __ addi            (keypos, keypos, 16);
2839    __ lvx             (vTmp1, keypos, key);
2840    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2841
2842    __ bind(L_doLast);
2843
2844    // last two rounds
2845    __ vcipher (vRet, vRet, vKey1);
2846    __ vcipherlast (vRet, vRet, vKey2);
2847
2848    __ neg             (temp, to);
2849    __ lvsr            (toPerm, temp);
2850    __ vspltisb        (vTmp2, -1);
2851    __ vxor            (vTmp1, vTmp1, vTmp1);
2852    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
2853    __ vxor            (toPerm, toPerm, fSplt);
2854    __ lvx             (vTmp1, to);
2855    __ vperm           (vRet, vRet, vRet, toPerm);
2856    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
2857    __ lvx             (vTmp4, fifteen, to);
2858    __ stvx            (vTmp1, to);
2859    __ vsel            (vRet, vRet, vTmp4, vTmp2);
2860    __ stvx            (vRet, fifteen, to);
2861
2862    __ blr();
2863     return start;
2864  }
2865
2866  // Arguments for generated stub (little endian only):
2867  //   R3_ARG1   - source byte array address
2868  //   R4_ARG2   - destination byte array address
2869  //   R5_ARG3   - K (key) in little endian int array
2870  address generate_aescrypt_decryptBlock() {
2871    assert(UseAES, "need AES instructions and misaligned SSE support");
2872    StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
2873
2874    address start = __ function_entry();
2875
2876    Label L_doLast;
2877    Label L_do44;
2878    Label L_do52;
2879    Label L_do60;
2880
2881    Register from           = R3_ARG1;  // source array address
2882    Register to             = R4_ARG2;  // destination array address
2883    Register key            = R5_ARG3;  // round key array
2884
2885    Register keylen         = R8;
2886    Register temp           = R9;
2887    Register keypos         = R10;
2888    Register hex            = R11;
2889    Register fifteen        = R12;
2890
2891    VectorRegister vRet     = VR0;
2892
2893    VectorRegister vKey1    = VR1;
2894    VectorRegister vKey2    = VR2;
2895    VectorRegister vKey3    = VR3;
2896    VectorRegister vKey4    = VR4;
2897    VectorRegister vKey5    = VR5;
2898
2899    VectorRegister fromPerm = VR6;
2900    VectorRegister keyPerm  = VR7;
2901    VectorRegister toPerm   = VR8;
2902    VectorRegister fSplt    = VR9;
2903
2904    VectorRegister vTmp1    = VR10;
2905    VectorRegister vTmp2    = VR11;
2906    VectorRegister vTmp3    = VR12;
2907    VectorRegister vTmp4    = VR13;
2908
2909    VectorRegister vLow     = VR14;
2910    VectorRegister vHigh    = VR15;
2911
2912    __ li              (hex, 16);
2913    __ li              (fifteen, 15);
2914    __ vspltisb        (fSplt, 0x0f);
2915
2916    // load unaligned from[0-15] to vsRet
2917    __ lvx             (vRet, from);
2918    __ lvx             (vTmp1, fifteen, from);
2919    __ lvsl            (fromPerm, from);
2920    __ vxor            (fromPerm, fromPerm, fSplt);
2921    __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
2922
2923    // load keylen (44 or 52 or 60)
2924    __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2925
2926    // to load keys
2927    __ lvsr            (keyPerm, key);
2928    __ vxor            (vTmp2, vTmp2, vTmp2);
2929    __ vspltisb        (vTmp2, -16);
2930    __ vrld            (keyPerm, keyPerm, vTmp2);
2931    __ vrld            (keyPerm, keyPerm, vTmp2);
2932    __ vsldoi          (keyPerm, keyPerm, keyPerm, -8);
2933
2934    __ cmpwi           (CCR0, keylen, 44);
2935    __ beq             (CCR0, L_do44);
2936
2937    __ cmpwi           (CCR0, keylen, 52);
2938    __ beq             (CCR0, L_do52);
2939
2940    // load the 15th round key to vKey11
2941    __ li              (keypos, 240);
2942    __ lvx             (vTmp1, keypos, key);
2943    __ addi            (keypos, keypos, -16);
2944    __ lvx             (vTmp2, keypos, key);
2945    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2946
2947    // load the 14th round key to vKey10
2948    __ addi            (keypos, keypos, -16);
2949    __ lvx             (vTmp1, keypos, key);
2950    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2951
2952    // load the 13th round key to vKey10
2953    __ addi            (keypos, keypos, -16);
2954    __ lvx             (vTmp2, keypos, key);
2955    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2956
2957    // load the 12th round key to vKey10
2958    __ addi            (keypos, keypos, -16);
2959    __ lvx             (vTmp1, keypos, key);
2960    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
2961
2962    // load the 11th round key to vKey10
2963    __ addi            (keypos, keypos, -16);
2964    __ lvx             (vTmp2, keypos, key);
2965    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
2966
2967    // 1st - 5th rounds
2968    __ vxor            (vRet, vRet, vKey1);
2969    __ vncipher        (vRet, vRet, vKey2);
2970    __ vncipher        (vRet, vRet, vKey3);
2971    __ vncipher        (vRet, vRet, vKey4);
2972    __ vncipher        (vRet, vRet, vKey5);
2973
2974    __ b               (L_doLast);
2975
2976    __ bind            (L_do52);
2977
2978    // load the 13th round key to vKey11
2979    __ li              (keypos, 208);
2980    __ lvx             (vTmp1, keypos, key);
2981    __ addi            (keypos, keypos, -16);
2982    __ lvx             (vTmp2, keypos, key);
2983    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2984
2985    // load the 12th round key to vKey10
2986    __ addi            (keypos, keypos, -16);
2987    __ lvx             (vTmp1, keypos, key);
2988    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2989
2990    // load the 11th round key to vKey10
2991    __ addi            (keypos, keypos, -16);
2992    __ lvx             (vTmp2, keypos, key);
2993    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2994
2995    // 1st - 3rd rounds
2996    __ vxor            (vRet, vRet, vKey1);
2997    __ vncipher        (vRet, vRet, vKey2);
2998    __ vncipher        (vRet, vRet, vKey3);
2999
3000    __ b               (L_doLast);
3001
3002    __ bind            (L_do44);
3003
3004    // load the 11th round key to vKey11
3005    __ li              (keypos, 176);
3006    __ lvx             (vTmp1, keypos, key);
3007    __ addi            (keypos, keypos, -16);
3008    __ lvx             (vTmp2, keypos, key);
3009    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
3010
3011    // 1st round
3012    __ vxor            (vRet, vRet, vKey1);
3013
3014    __ bind            (L_doLast);
3015
3016    // load the 10th round key to vKey10
3017    __ addi            (keypos, keypos, -16);
3018    __ lvx             (vTmp1, keypos, key);
3019    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
3020
3021    // load the 9th round key to vKey10
3022    __ addi            (keypos, keypos, -16);
3023    __ lvx             (vTmp2, keypos, key);
3024    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
3025
3026    // load the 8th round key to vKey10
3027    __ addi            (keypos, keypos, -16);
3028    __ lvx             (vTmp1, keypos, key);
3029    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
3030
3031    // load the 7th round key to vKey10
3032    __ addi            (keypos, keypos, -16);
3033    __ lvx             (vTmp2, keypos, key);
3034    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
3035
3036    // load the 6th round key to vKey10
3037    __ addi            (keypos, keypos, -16);
3038    __ lvx             (vTmp1, keypos, key);
3039    __ vperm           (vKey5, vTmp2, vTmp1, keyPerm);
3040
3041    // last 10th - 6th rounds
3042    __ vncipher        (vRet, vRet, vKey1);
3043    __ vncipher        (vRet, vRet, vKey2);
3044    __ vncipher        (vRet, vRet, vKey3);
3045    __ vncipher        (vRet, vRet, vKey4);
3046    __ vncipher        (vRet, vRet, vKey5);
3047
3048    // load the 5th round key to vKey10
3049    __ addi            (keypos, keypos, -16);
3050    __ lvx             (vTmp2, keypos, key);
3051    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
3052
3053    // load the 4th round key to vKey10
3054    __ addi            (keypos, keypos, -16);
3055    __ lvx             (vTmp1, keypos, key);
3056    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
3057
3058    // load the 3rd round key to vKey10
3059    __ addi            (keypos, keypos, -16);
3060    __ lvx             (vTmp2, keypos, key);
3061    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
3062
3063    // load the 2nd round key to vKey10
3064    __ addi            (keypos, keypos, -16);
3065    __ lvx             (vTmp1, keypos, key);
3066    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
3067
3068    // load the 1st round key to vKey10
3069    __ addi            (keypos, keypos, -16);
3070    __ lvx             (vTmp2, keypos, key);
3071    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
3072
3073    // last 5th - 1th rounds
3074    __ vncipher        (vRet, vRet, vKey1);
3075    __ vncipher        (vRet, vRet, vKey2);
3076    __ vncipher        (vRet, vRet, vKey3);
3077    __ vncipher        (vRet, vRet, vKey4);
3078    __ vncipherlast    (vRet, vRet, vKey5);
3079
3080    __ neg             (temp, to);
3081    __ lvsr            (toPerm, temp);
3082    __ vspltisb        (vTmp2, -1);
3083    __ vxor            (vTmp1, vTmp1, vTmp1);
3084    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
3085    __ vxor            (toPerm, toPerm, fSplt);
3086    __ lvx             (vTmp1, to);
3087    __ vperm           (vRet, vRet, vRet, toPerm);
3088    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
3089    __ lvx             (vTmp4, fifteen, to);
3090    __ stvx            (vTmp1, to);
3091    __ vsel            (vRet, vRet, vTmp4, vTmp2);
3092    __ stvx            (vRet, fifteen, to);
3093
3094    __ blr();
3095     return start;
3096  }
3097
3098  void generate_arraycopy_stubs() {
3099    // Note: the disjoint stubs must be generated first, some of
3100    // the conjoint stubs use them.
3101
3102    // non-aligned disjoint versions
3103    StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
3104    StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
3105    StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
3106    StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
3107    StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
3108    StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
3109
3110    // aligned disjoint versions
3111    StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
3112    StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
3113    StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
3114    StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
3115    StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
3116    StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
3117
3118    // non-aligned conjoint versions
3119    StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
3120    StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
3121    StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
3122    StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
3123    StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
3124    StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
3125
3126    // aligned conjoint versions
3127    StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
3128    StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
3129    StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
3130    StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
3131    StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
3132    StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
3133
3134    // special/generic versions
3135    StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", false);
3136    StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
3137
3138    StubRoutines::_unsafe_arraycopy  = generate_unsafe_copy("unsafe_arraycopy",
3139                                                            STUB_ENTRY(jbyte_arraycopy),
3140                                                            STUB_ENTRY(jshort_arraycopy),
3141                                                            STUB_ENTRY(jint_arraycopy),
3142                                                            STUB_ENTRY(jlong_arraycopy));
3143    StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
3144                                                             STUB_ENTRY(jbyte_arraycopy),
3145                                                             STUB_ENTRY(jshort_arraycopy),
3146                                                             STUB_ENTRY(jint_arraycopy),
3147                                                             STUB_ENTRY(oop_arraycopy),
3148                                                             STUB_ENTRY(oop_disjoint_arraycopy),
3149                                                             STUB_ENTRY(jlong_arraycopy),
3150                                                             STUB_ENTRY(checkcast_arraycopy));
3151
3152    // fill routines
3153    if (OptimizeFill) {
3154      StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
3155      StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
3156      StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
3157      StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
3158      StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
3159      StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
3160    }
3161  }
3162
3163  // Safefetch stubs.
3164  void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
3165    // safefetch signatures:
3166    //   int      SafeFetch32(int*      adr, int      errValue);
3167    //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
3168    //
3169    // arguments:
3170    //   R3_ARG1 = adr
3171    //   R4_ARG2 = errValue
3172    //
3173    // result:
3174    //   R3_RET  = *adr or errValue
3175
3176    StubCodeMark mark(this, "StubRoutines", name);
3177
3178    // Entry point, pc or function descriptor.
3179    *entry = __ function_entry();
3180
3181    // Load *adr into R4_ARG2, may fault.
3182    *fault_pc = __ pc();
3183    switch (size) {
3184      case 4:
3185        // int32_t, signed extended
3186        __ lwa(R4_ARG2, 0, R3_ARG1);
3187        break;
3188      case 8:
3189        // int64_t
3190        __ ld(R4_ARG2, 0, R3_ARG1);
3191        break;
3192      default:
3193        ShouldNotReachHere();
3194    }
3195
3196    // return errValue or *adr
3197    *continuation_pc = __ pc();
3198    __ mr(R3_RET, R4_ARG2);
3199    __ blr();
3200  }
3201
3202  // Stub for BigInteger::multiplyToLen()
3203  //
3204  //  Arguments:
3205  //
3206  //  Input:
3207  //    R3 - x address
3208  //    R4 - x length
3209  //    R5 - y address
3210  //    R6 - y length
3211  //    R7 - z address
3212  //    R8 - z length
3213  //
3214  address generate_multiplyToLen() {
3215
3216    StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
3217
3218    address start = __ function_entry();
3219
3220    const Register x     = R3;
3221    const Register xlen  = R4;
3222    const Register y     = R5;
3223    const Register ylen  = R6;
3224    const Register z     = R7;
3225    const Register zlen  = R8;
3226
3227    const Register tmp1  = R2; // TOC not used.
3228    const Register tmp2  = R9;
3229    const Register tmp3  = R10;
3230    const Register tmp4  = R11;
3231    const Register tmp5  = R12;
3232
3233    // non-volatile regs
3234    const Register tmp6  = R31;
3235    const Register tmp7  = R30;
3236    const Register tmp8  = R29;
3237    const Register tmp9  = R28;
3238    const Register tmp10 = R27;
3239    const Register tmp11 = R26;
3240    const Register tmp12 = R25;
3241    const Register tmp13 = R24;
3242
3243    BLOCK_COMMENT("Entry:");
3244
3245    // C2 does not respect int to long conversion for stub calls.
3246    __ clrldi(xlen, xlen, 32);
3247    __ clrldi(ylen, ylen, 32);
3248    __ clrldi(zlen, zlen, 32);
3249
3250    // Save non-volatile regs (frameless).
3251    int current_offs = 8;
3252    __ std(R24, -current_offs, R1_SP); current_offs += 8;
3253    __ std(R25, -current_offs, R1_SP); current_offs += 8;
3254    __ std(R26, -current_offs, R1_SP); current_offs += 8;
3255    __ std(R27, -current_offs, R1_SP); current_offs += 8;
3256    __ std(R28, -current_offs, R1_SP); current_offs += 8;
3257    __ std(R29, -current_offs, R1_SP); current_offs += 8;
3258    __ std(R30, -current_offs, R1_SP); current_offs += 8;
3259    __ std(R31, -current_offs, R1_SP);
3260
3261    __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5,
3262                       tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13);
3263
3264    // Restore non-volatile regs.
3265    current_offs = 8;
3266    __ ld(R24, -current_offs, R1_SP); current_offs += 8;
3267    __ ld(R25, -current_offs, R1_SP); current_offs += 8;
3268    __ ld(R26, -current_offs, R1_SP); current_offs += 8;
3269    __ ld(R27, -current_offs, R1_SP); current_offs += 8;
3270    __ ld(R28, -current_offs, R1_SP); current_offs += 8;
3271    __ ld(R29, -current_offs, R1_SP); current_offs += 8;
3272    __ ld(R30, -current_offs, R1_SP); current_offs += 8;
3273    __ ld(R31, -current_offs, R1_SP);
3274
3275    __ blr();  // Return to caller.
3276
3277    return start;
3278  }
3279
3280
3281  // Compute CRC32/CRC32C function.
3282  void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
3283
3284      // arguments to kernel_crc32:
3285      const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3286      const Register data    = R4_ARG2;  // source byte array
3287      const Register dataLen = R5_ARG3;  // #bytes to process
3288
3289      const Register t0      = R2;
3290      const Register t1      = R7;
3291      const Register t2      = R8;
3292      const Register t3      = R9;
3293      const Register tc0     = R10;
3294      const Register tc1     = R11;
3295      const Register tc2     = R12;
3296
3297      BLOCK_COMMENT("Stub body {");
3298      assert_different_registers(crc, data, dataLen, table);
3299
3300      __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table, invertCRC);
3301
3302      BLOCK_COMMENT("return");
3303      __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3304      __ blr();
3305
3306      BLOCK_COMMENT("} Stub body");
3307  }
3308
3309
3310  /**
3311   * Arguments:
3312   *
3313   * Inputs:
3314   *   R3_ARG1    - int   crc
3315   *   R4_ARG2    - byte* buf
3316   *   R5_ARG3    - int   length (of buffer)
3317   *
3318   * scratch:
3319   *   R2, R6-R12
3320   *
3321   * Ouput:
3322   *   R3_RET     - int   crc result
3323   */
3324  // Compute CRC32 function.
3325  address generate_CRC32_updateBytes(const char* name) {
3326    __ align(CodeEntryAlignment);
3327    StubCodeMark mark(this, "StubRoutines", name);
3328    address start = __ function_entry();  // Remember stub start address (is rtn value).
3329
3330    const Register table   = R6;       // crc table address
3331
3332#ifdef VM_LITTLE_ENDIAN
3333    // arguments to kernel_crc32:
3334    const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3335    const Register data    = R4_ARG2;  // source byte array
3336    const Register dataLen = R5_ARG3;  // #bytes to process
3337
3338    if (VM_Version::has_vpmsumb()) {
3339      const Register constants    = R2;  // constants address
3340      const Register bconstants   = R8;  // barret table address
3341
3342      const Register t0      = R9;
3343      const Register t1      = R10;
3344      const Register t2      = R11;
3345      const Register t3      = R12;
3346      const Register t4      = R7;
3347
3348      BLOCK_COMMENT("Stub body {");
3349      assert_different_registers(crc, data, dataLen, table);
3350
3351      StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
3352      StubRoutines::ppc64::generate_load_crc_constants_addr(_masm, constants);
3353      StubRoutines::ppc64::generate_load_crc_barret_constants_addr(_masm, bconstants);
3354
3355      __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, true);
3356
3357      BLOCK_COMMENT("return");
3358      __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3359      __ blr();
3360
3361      BLOCK_COMMENT("} Stub body");
3362    } else
3363#endif
3364    {
3365      StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
3366      generate_CRC_updateBytes(name, table, true);
3367    }
3368
3369    return start;
3370  }
3371
3372
3373  /**
3374   * Arguments:
3375   *
3376   * Inputs:
3377   *   R3_ARG1    - int   crc
3378   *   R4_ARG2    - byte* buf
3379   *   R5_ARG3    - int   length (of buffer)
3380   *
3381   * scratch:
3382   *   R2, R6-R12
3383   *
3384   * Ouput:
3385   *   R3_RET     - int   crc result
3386   */
3387  // Compute CRC32C function.
3388  address generate_CRC32C_updateBytes(const char* name) {
3389    __ align(CodeEntryAlignment);
3390    StubCodeMark mark(this, "StubRoutines", name);
3391    address start = __ function_entry();  // Remember stub start address (is rtn value).
3392
3393    const Register table   = R6;       // crc table address
3394
3395#if 0   // no vector support yet for CRC32C
3396#ifdef VM_LITTLE_ENDIAN
3397    // arguments to kernel_crc32:
3398    const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3399    const Register data    = R4_ARG2;  // source byte array
3400    const Register dataLen = R5_ARG3;  // #bytes to process
3401
3402    if (VM_Version::has_vpmsumb()) {
3403      const Register constants    = R2;  // constants address
3404      const Register bconstants   = R8;  // barret table address
3405
3406      const Register t0      = R9;
3407      const Register t1      = R10;
3408      const Register t2      = R11;
3409      const Register t3      = R12;
3410      const Register t4      = R7;
3411
3412      BLOCK_COMMENT("Stub body {");
3413      assert_different_registers(crc, data, dataLen, table);
3414
3415      StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
3416      StubRoutines::ppc64::generate_load_crc32c_constants_addr(_masm, constants);
3417      StubRoutines::ppc64::generate_load_crc32c_barret_constants_addr(_masm, bconstants);
3418
3419      __ kernel_crc32_1word_vpmsumd(crc, data, dataLen, table, constants, bconstants, t0, t1, t2, t3, t4, false);
3420
3421      BLOCK_COMMENT("return");
3422      __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3423      __ blr();
3424
3425      BLOCK_COMMENT("} Stub body");
3426    } else
3427#endif
3428#endif
3429    {
3430      StubRoutines::ppc64::generate_load_crc32c_table_addr(_masm, table);
3431      generate_CRC_updateBytes(name, table, false);
3432    }
3433
3434    return start;
3435  }
3436
3437
3438  // Initialization
3439  void generate_initial() {
3440    // Generates all stubs and initializes the entry points
3441
3442    // Entry points that exist in all platforms.
3443    // Note: This is code that could be shared among different platforms - however the
3444    // benefit seems to be smaller than the disadvantage of having a
3445    // much more complicated generator structure. See also comment in
3446    // stubRoutines.hpp.
3447
3448    StubRoutines::_forward_exception_entry          = generate_forward_exception();
3449    StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
3450    StubRoutines::_catch_exception_entry            = generate_catch_exception();
3451
3452    // Build this early so it's available for the interpreter.
3453    StubRoutines::_throw_StackOverflowError_entry   =
3454      generate_throw_exception("StackOverflowError throw_exception",
3455                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
3456    StubRoutines::_throw_delayed_StackOverflowError_entry =
3457      generate_throw_exception("delayed StackOverflowError throw_exception",
3458                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
3459
3460    // CRC32 Intrinsics.
3461    if (UseCRC32Intrinsics) {
3462      StubRoutines::_crc_table_adr    = (address)StubRoutines::ppc64::_crc_table;
3463      StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
3464    }
3465
3466    // CRC32C Intrinsics.
3467    if (UseCRC32CIntrinsics) {
3468      StubRoutines::_crc32c_table_addr = (address)StubRoutines::ppc64::_crc32c_table;
3469      StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
3470    }
3471  }
3472
3473  void generate_all() {
3474    // Generates all stubs and initializes the entry points
3475
3476    // These entry points require SharedInfo::stack0 to be set up in
3477    // non-core builds
3478    StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
3479    // Handle IncompatibleClassChangeError in itable stubs.
3480    StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
3481    StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3482
3483    // support for verify_oop (must happen after universe_init)
3484    StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
3485
3486    // arraycopy stubs used by compilers
3487    generate_arraycopy_stubs();
3488
3489    // Safefetch stubs.
3490    generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
3491                                                       &StubRoutines::_safefetch32_fault_pc,
3492                                                       &StubRoutines::_safefetch32_continuation_pc);
3493    generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
3494                                                       &StubRoutines::_safefetchN_fault_pc,
3495                                                       &StubRoutines::_safefetchN_continuation_pc);
3496
3497#ifdef COMPILER2
3498    if (UseMultiplyToLenIntrinsic) {
3499      StubRoutines::_multiplyToLen = generate_multiplyToLen();
3500    }
3501#endif
3502
3503    if (UseMontgomeryMultiplyIntrinsic) {
3504      StubRoutines::_montgomeryMultiply
3505        = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
3506    }
3507    if (UseMontgomerySquareIntrinsic) {
3508      StubRoutines::_montgomerySquare
3509        = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
3510    }
3511
3512    if (UseAESIntrinsics) {
3513      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3514      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3515    }
3516
3517  }
3518
3519 public:
3520  StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3521    // replace the standard masm with a special one:
3522    _masm = new MacroAssembler(code);
3523    if (all) {
3524      generate_all();
3525    } else {
3526      generate_initial();
3527    }
3528  }
3529};
3530
3531void StubGenerator_generate(CodeBuffer* code, bool all) {
3532  StubGenerator g(code, all);
3533}
3534