stubGenerator_ppc.cpp revision 11374:3fb9a97eb099
1263320Sdim/*
2263320Sdim * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3263320Sdim * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
4263320Sdim * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5263320Sdim *
6263320Sdim * This code is free software; you can redistribute it and/or modify it
7263320Sdim * under the terms of the GNU General Public License version 2 only, as
8263320Sdim * published by the Free Software Foundation.
9263320Sdim *
10263320Sdim * This code is distributed in the hope that it will be useful, but WITHOUT
11263320Sdim * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12263320Sdim * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13263320Sdim * version 2 for more details (a copy is included in the LICENSE file that
14263320Sdim * accompanied this code).
15263320Sdim *
16263320Sdim * You should have received a copy of the GNU General Public License version
17263320Sdim * 2 along with this work; if not, write to the Free Software Foundation,
18263320Sdim * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19269012Semaste *
20263320Sdim * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21263320Sdim * or visit www.oracle.com if you need additional information or have any
22263320Sdim * questions.
23263320Sdim *
24263320Sdim */
25263320Sdim
26263320Sdim#include "precompiled.hpp"
27263320Sdim#include "asm/macroAssembler.inline.hpp"
28263320Sdim#include "interpreter/interpreter.hpp"
29263320Sdim#include "nativeInst_ppc.hpp"
30263320Sdim#include "oops/instanceOop.hpp"
31263320Sdim#include "oops/method.hpp"
32263320Sdim#include "oops/objArrayKlass.hpp"
33263320Sdim#include "oops/oop.inline.hpp"
34263320Sdim#include "prims/methodHandles.hpp"
35263320Sdim#include "runtime/frame.inline.hpp"
36263320Sdim#include "runtime/handles.inline.hpp"
37263320Sdim#include "runtime/sharedRuntime.hpp"
38263320Sdim#include "runtime/stubCodeGenerator.hpp"
39263320Sdim#include "runtime/stubRoutines.hpp"
40263320Sdim#include "runtime/thread.inline.hpp"
41263320Sdim
42263320Sdim#define __ _masm->
43263320Sdim
44263320Sdim#ifdef PRODUCT
45263320Sdim#define BLOCK_COMMENT(str) // nothing
46263320Sdim#else
47263320Sdim#define BLOCK_COMMENT(str) __ block_comment(str)
48263320Sdim#endif
49263320Sdim
50263320Sdim#if defined(ABI_ELFv2)
51263320Sdim#define STUB_ENTRY(name) StubRoutines::name()
52263320Sdim#else
53263320Sdim#define STUB_ENTRY(name) ((FunctionDescriptor*)StubRoutines::name())->entry()
54263320Sdim#endif
55263320Sdim
56263320Sdimclass StubGenerator: public StubCodeGenerator {
57263320Sdim private:
58263320Sdim
59263320Sdim  // Call stubs are used to call Java from C
60263320Sdim  //
61263320Sdim  // Arguments:
62263320Sdim  //
63263320Sdim  //   R3  - call wrapper address     : address
64263320Sdim  //   R4  - result                   : intptr_t*
65263320Sdim  //   R5  - result type              : BasicType
66263320Sdim  //   R6  - method                   : Method
67263320Sdim  //   R7  - frame mgr entry point    : address
68263320Sdim  //   R8  - parameter block          : intptr_t*
69263320Sdim  //   R9  - parameter count in words : int
70263320Sdim  //   R10 - thread                   : Thread*
71263320Sdim  //
72263320Sdim  address generate_call_stub(address& return_address) {
73263320Sdim    // Setup a new c frame, copy java arguments, call frame manager or
74263320Sdim    // native_entry, and process result.
75263320Sdim
76263320Sdim    StubCodeMark mark(this, "StubRoutines", "call_stub");
77263320Sdim
78263320Sdim    address start = __ function_entry();
79263320Sdim
80263320Sdim    // some sanity checks
81263320Sdim    assert((sizeof(frame::abi_minframe) % 16) == 0,           "unaligned");
82263320Sdim    assert((sizeof(frame::abi_reg_args) % 16) == 0,           "unaligned");
83263320Sdim    assert((sizeof(frame::spill_nonvolatiles) % 16) == 0,     "unaligned");
84263320Sdim    assert((sizeof(frame::parent_ijava_frame_abi) % 16) == 0, "unaligned");
85263320Sdim    assert((sizeof(frame::entry_frame_locals) % 16) == 0,     "unaligned");
86263320Sdim
87263320Sdim    Register r_arg_call_wrapper_addr        = R3;
88263320Sdim    Register r_arg_result_addr              = R4;
89263320Sdim    Register r_arg_result_type              = R5;
90263320Sdim    Register r_arg_method                   = R6;
91263320Sdim    Register r_arg_entry                    = R7;
92263320Sdim    Register r_arg_thread                   = R10;
93263320Sdim
94263320Sdim    Register r_temp                         = R24;
95263320Sdim    Register r_top_of_arguments_addr        = R25;
96263320Sdim    Register r_entryframe_fp                = R26;
97263320Sdim
98263320Sdim    {
99263320Sdim      // Stack on entry to call_stub:
100263320Sdim      //
101263320Sdim      //      F1      [C_FRAME]
102263320Sdim      //              ...
103263320Sdim
104263320Sdim      Register r_arg_argument_addr          = R8;
105263320Sdim      Register r_arg_argument_count         = R9;
106263320Sdim      Register r_frame_alignment_in_bytes   = R27;
107263320Sdim      Register r_argument_addr              = R28;
108263320Sdim      Register r_argumentcopy_addr          = R29;
109263320Sdim      Register r_argument_size_in_bytes     = R30;
110263320Sdim      Register r_frame_size                 = R23;
111263320Sdim
112263320Sdim      Label arguments_copied;
113263320Sdim
114263320Sdim      // Save LR/CR to caller's C_FRAME.
115263320Sdim      __ save_LR_CR(R0);
116263320Sdim
117263320Sdim      // Zero extend arg_argument_count.
118263320Sdim      __ clrldi(r_arg_argument_count, r_arg_argument_count, 32);
119263320Sdim
120263320Sdim      // Save non-volatiles GPRs to ENTRY_FRAME (not yet pushed, but it's safe).
121263320Sdim      __ save_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
122263320Sdim
123263320Sdim      // Keep copy of our frame pointer (caller's SP).
124263320Sdim      __ mr(r_entryframe_fp, R1_SP);
125263320Sdim
126263320Sdim      BLOCK_COMMENT("Push ENTRY_FRAME including arguments");
127263320Sdim      // Push ENTRY_FRAME including arguments:
128263320Sdim      //
129263320Sdim      //      F0      [TOP_IJAVA_FRAME_ABI]
130263320Sdim      //              alignment (optional)
131263320Sdim      //              [outgoing Java arguments]
132263320Sdim      //              [ENTRY_FRAME_LOCALS]
133263320Sdim      //      F1      [C_FRAME]
134263320Sdim      //              ...
135263320Sdim
136263320Sdim      // calculate frame size
137263320Sdim
138263320Sdim      // unaligned size of arguments
139263320Sdim      __ sldi(r_argument_size_in_bytes,
140263320Sdim                  r_arg_argument_count, Interpreter::logStackElementSize);
141263320Sdim      // arguments alignment (max 1 slot)
142263320Sdim      // FIXME: use round_to() here
143263320Sdim      __ andi_(r_frame_alignment_in_bytes, r_arg_argument_count, 1);
144263320Sdim      __ sldi(r_frame_alignment_in_bytes,
145263320Sdim              r_frame_alignment_in_bytes, Interpreter::logStackElementSize);
146263320Sdim
147263320Sdim      // size = unaligned size of arguments + top abi's size
148263320Sdim      __ addi(r_frame_size, r_argument_size_in_bytes,
149263320Sdim              frame::top_ijava_frame_abi_size);
150263320Sdim      // size += arguments alignment
151263320Sdim      __ add(r_frame_size,
152263320Sdim             r_frame_size, r_frame_alignment_in_bytes);
153263320Sdim      // size += size of call_stub locals
154263320Sdim      __ addi(r_frame_size,
155263320Sdim              r_frame_size, frame::entry_frame_locals_size);
156263320Sdim
157263320Sdim      // push ENTRY_FRAME
158263320Sdim      __ push_frame(r_frame_size, r_temp);
159263320Sdim
160263320Sdim      // initialize call_stub locals (step 1)
161263320Sdim      __ std(r_arg_call_wrapper_addr,
162263320Sdim             _entry_frame_locals_neg(call_wrapper_address), r_entryframe_fp);
163263320Sdim      __ std(r_arg_result_addr,
164263320Sdim             _entry_frame_locals_neg(result_address), r_entryframe_fp);
165263320Sdim      __ std(r_arg_result_type,
166263320Sdim             _entry_frame_locals_neg(result_type), r_entryframe_fp);
167263320Sdim      // we will save arguments_tos_address later
168263320Sdim
169263320Sdim
170263320Sdim      BLOCK_COMMENT("Copy Java arguments");
171263320Sdim      // copy Java arguments
172263320Sdim
173263320Sdim      // Calculate top_of_arguments_addr which will be R17_tos (not prepushed) later.
174263320Sdim      // FIXME: why not simply use SP+frame::top_ijava_frame_size?
175263320Sdim      __ addi(r_top_of_arguments_addr,
176263320Sdim              R1_SP, frame::top_ijava_frame_abi_size);
177263320Sdim      __ add(r_top_of_arguments_addr,
178263320Sdim             r_top_of_arguments_addr, r_frame_alignment_in_bytes);
179263320Sdim
180263320Sdim      // any arguments to copy?
181263320Sdim      __ cmpdi(CCR0, r_arg_argument_count, 0);
182263320Sdim      __ beq(CCR0, arguments_copied);
183263320Sdim
184263320Sdim      // prepare loop and copy arguments in reverse order
185263320Sdim      {
186263320Sdim        // init CTR with arg_argument_count
187263320Sdim        __ mtctr(r_arg_argument_count);
188263320Sdim
189263320Sdim        // let r_argumentcopy_addr point to last outgoing Java arguments P
190263320Sdim        __ mr(r_argumentcopy_addr, r_top_of_arguments_addr);
191263320Sdim
192263320Sdim        // let r_argument_addr point to last incoming java argument
193263320Sdim        __ add(r_argument_addr,
194263320Sdim                   r_arg_argument_addr, r_argument_size_in_bytes);
195263320Sdim        __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
196263320Sdim
197263320Sdim        // now loop while CTR > 0 and copy arguments
198263320Sdim        {
199263320Sdim          Label next_argument;
200263320Sdim          __ bind(next_argument);
201263320Sdim
202263320Sdim          __ ld(r_temp, 0, r_argument_addr);
203263320Sdim          // argument_addr--;
204263320Sdim          __ addi(r_argument_addr, r_argument_addr, -BytesPerWord);
205263320Sdim          __ std(r_temp, 0, r_argumentcopy_addr);
206263320Sdim          // argumentcopy_addr++;
207263320Sdim          __ addi(r_argumentcopy_addr, r_argumentcopy_addr, BytesPerWord);
208263320Sdim
209263320Sdim          __ bdnz(next_argument);
210263320Sdim        }
211263320Sdim      }
212263320Sdim
213263320Sdim      // Arguments copied, continue.
214263320Sdim      __ bind(arguments_copied);
215263320Sdim    }
216263320Sdim
217263320Sdim    {
218263320Sdim      BLOCK_COMMENT("Call frame manager or native entry.");
219263320Sdim      // Call frame manager or native entry.
220263320Sdim      Register r_new_arg_entry = R14;
221263320Sdim      assert_different_registers(r_new_arg_entry, r_top_of_arguments_addr,
222263320Sdim                                 r_arg_method, r_arg_thread);
223263320Sdim
224263320Sdim      __ mr(r_new_arg_entry, r_arg_entry);
225263320Sdim
226263320Sdim      // Register state on entry to frame manager / native entry:
227263320Sdim      //
228263320Sdim      //   tos         -  intptr_t*    sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
229263320Sdim      //   R19_method  -  Method
230263320Sdim      //   R16_thread  -  JavaThread*
231263320Sdim
232263320Sdim      // Tos must point to last argument - element_size.
233263320Sdim      const Register tos = R15_esp;
234263320Sdim
235263320Sdim      __ addi(tos, r_top_of_arguments_addr, -Interpreter::stackElementSize);
236263320Sdim
237263320Sdim      // initialize call_stub locals (step 2)
238263320Sdim      // now save tos as arguments_tos_address
239263320Sdim      __ std(tos, _entry_frame_locals_neg(arguments_tos_address), r_entryframe_fp);
240263320Sdim
241263320Sdim      // load argument registers for call
242263320Sdim      __ mr(R19_method, r_arg_method);
243263320Sdim      __ mr(R16_thread, r_arg_thread);
244263320Sdim      assert(tos != r_arg_method, "trashed r_arg_method");
245263320Sdim      assert(tos != r_arg_thread && R19_method != r_arg_thread, "trashed r_arg_thread");
246263320Sdim
247263320Sdim      // Set R15_prev_state to 0 for simplifying checks in callee.
248263320Sdim      __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R11_scratch1);
249263320Sdim      // Stack on entry to frame manager / native entry:
250263320Sdim      //
251263320Sdim      //      F0      [TOP_IJAVA_FRAME_ABI]
252263320Sdim      //              alignment (optional)
253263320Sdim      //              [outgoing Java arguments]
254263320Sdim      //              [ENTRY_FRAME_LOCALS]
255263320Sdim      //      F1      [C_FRAME]
256263320Sdim      //              ...
257263320Sdim      //
258263320Sdim
259263320Sdim      // global toc register
260263320Sdim      __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R11_scratch1);
261263320Sdim      // Remember the senderSP so we interpreter can pop c2i arguments off of the stack
262263320Sdim      // when called via a c2i.
263263320Sdim
264      // Pass initial_caller_sp to framemanager.
265      __ mr(R21_tmp1, R1_SP);
266
267      // Do a light-weight C-call here, r_new_arg_entry holds the address
268      // of the interpreter entry point (frame manager or native entry)
269      // and save runtime-value of LR in return_address.
270      assert(r_new_arg_entry != tos && r_new_arg_entry != R19_method && r_new_arg_entry != R16_thread,
271             "trashed r_new_arg_entry");
272      return_address = __ call_stub(r_new_arg_entry);
273    }
274
275    {
276      BLOCK_COMMENT("Returned from frame manager or native entry.");
277      // Returned from frame manager or native entry.
278      // Now pop frame, process result, and return to caller.
279
280      // Stack on exit from frame manager / native entry:
281      //
282      //      F0      [ABI]
283      //              ...
284      //              [ENTRY_FRAME_LOCALS]
285      //      F1      [C_FRAME]
286      //              ...
287      //
288      // Just pop the topmost frame ...
289      //
290
291      Label ret_is_object;
292      Label ret_is_long;
293      Label ret_is_float;
294      Label ret_is_double;
295
296      Register r_entryframe_fp = R30;
297      Register r_lr            = R7_ARG5;
298      Register r_cr            = R8_ARG6;
299
300      // Reload some volatile registers which we've spilled before the call
301      // to frame manager / native entry.
302      // Access all locals via frame pointer, because we know nothing about
303      // the topmost frame's size.
304      __ ld(r_entryframe_fp, _abi(callers_sp), R1_SP);
305      assert_different_registers(r_entryframe_fp, R3_RET, r_arg_result_addr, r_arg_result_type, r_cr, r_lr);
306      __ ld(r_arg_result_addr,
307            _entry_frame_locals_neg(result_address), r_entryframe_fp);
308      __ ld(r_arg_result_type,
309            _entry_frame_locals_neg(result_type), r_entryframe_fp);
310      __ ld(r_cr, _abi(cr), r_entryframe_fp);
311      __ ld(r_lr, _abi(lr), r_entryframe_fp);
312
313      // pop frame and restore non-volatiles, LR and CR
314      __ mr(R1_SP, r_entryframe_fp);
315      __ mtcr(r_cr);
316      __ mtlr(r_lr);
317
318      // Store result depending on type. Everything that is not
319      // T_OBJECT, T_LONG, T_FLOAT, or T_DOUBLE is treated as T_INT.
320      __ cmpwi(CCR0, r_arg_result_type, T_OBJECT);
321      __ cmpwi(CCR1, r_arg_result_type, T_LONG);
322      __ cmpwi(CCR5, r_arg_result_type, T_FLOAT);
323      __ cmpwi(CCR6, r_arg_result_type, T_DOUBLE);
324
325      // restore non-volatile registers
326      __ restore_nonvolatile_gprs(R1_SP, _spill_nonvolatiles_neg(r14));
327
328
329      // Stack on exit from call_stub:
330      //
331      //      0       [C_FRAME]
332      //              ...
333      //
334      //  no call_stub frames left.
335
336      // All non-volatiles have been restored at this point!!
337      assert(R3_RET == R3, "R3_RET should be R3");
338
339      __ beq(CCR0, ret_is_object);
340      __ beq(CCR1, ret_is_long);
341      __ beq(CCR5, ret_is_float);
342      __ beq(CCR6, ret_is_double);
343
344      // default:
345      __ stw(R3_RET, 0, r_arg_result_addr);
346      __ blr(); // return to caller
347
348      // case T_OBJECT:
349      __ bind(ret_is_object);
350      __ std(R3_RET, 0, r_arg_result_addr);
351      __ blr(); // return to caller
352
353      // case T_LONG:
354      __ bind(ret_is_long);
355      __ std(R3_RET, 0, r_arg_result_addr);
356      __ blr(); // return to caller
357
358      // case T_FLOAT:
359      __ bind(ret_is_float);
360      __ stfs(F1_RET, 0, r_arg_result_addr);
361      __ blr(); // return to caller
362
363      // case T_DOUBLE:
364      __ bind(ret_is_double);
365      __ stfd(F1_RET, 0, r_arg_result_addr);
366      __ blr(); // return to caller
367    }
368
369    return start;
370  }
371
372  // Return point for a Java call if there's an exception thrown in
373  // Java code.  The exception is caught and transformed into a
374  // pending exception stored in JavaThread that can be tested from
375  // within the VM.
376  //
377  address generate_catch_exception() {
378    StubCodeMark mark(this, "StubRoutines", "catch_exception");
379
380    address start = __ pc();
381
382    // Registers alive
383    //
384    //  R16_thread
385    //  R3_ARG1 - address of pending exception
386    //  R4_ARG2 - return address in call stub
387
388    const Register exception_file = R21_tmp1;
389    const Register exception_line = R22_tmp2;
390
391    __ load_const(exception_file, (void*)__FILE__);
392    __ load_const(exception_line, (void*)__LINE__);
393
394    __ std(R3_ARG1, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
395    // store into `char *'
396    __ std(exception_file, in_bytes(JavaThread::exception_file_offset()), R16_thread);
397    // store into `int'
398    __ stw(exception_line, in_bytes(JavaThread::exception_line_offset()), R16_thread);
399
400    // complete return to VM
401    assert(StubRoutines::_call_stub_return_address != NULL, "must have been generated before");
402
403    __ mtlr(R4_ARG2);
404    // continue in call stub
405    __ blr();
406
407    return start;
408  }
409
410  // Continuation point for runtime calls returning with a pending
411  // exception.  The pending exception check happened in the runtime
412  // or native call stub.  The pending exception in Thread is
413  // converted into a Java-level exception.
414  //
415  // Read:
416  //
417  //   LR:     The pc the runtime library callee wants to return to.
418  //           Since the exception occurred in the callee, the return pc
419  //           from the point of view of Java is the exception pc.
420  //   thread: Needed for method handles.
421  //
422  // Invalidate:
423  //
424  //   volatile registers (except below).
425  //
426  // Update:
427  //
428  //   R4_ARG2: exception
429  //
430  // (LR is unchanged and is live out).
431  //
432  address generate_forward_exception() {
433    StubCodeMark mark(this, "StubRoutines", "forward_exception");
434    address start = __ pc();
435
436#if !defined(PRODUCT)
437    if (VerifyOops) {
438      // Get pending exception oop.
439      __ ld(R3_ARG1,
440                in_bytes(Thread::pending_exception_offset()),
441                R16_thread);
442      // Make sure that this code is only executed if there is a pending exception.
443      {
444        Label L;
445        __ cmpdi(CCR0, R3_ARG1, 0);
446        __ bne(CCR0, L);
447        __ stop("StubRoutines::forward exception: no pending exception (1)");
448        __ bind(L);
449      }
450      __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
451    }
452#endif
453
454    // Save LR/CR and copy exception pc (LR) into R4_ARG2.
455    __ save_LR_CR(R4_ARG2);
456    __ push_frame_reg_args(0, R0);
457    // Find exception handler.
458    __ call_VM_leaf(CAST_FROM_FN_PTR(address,
459                     SharedRuntime::exception_handler_for_return_address),
460                    R16_thread,
461                    R4_ARG2);
462    // Copy handler's address.
463    __ mtctr(R3_RET);
464    __ pop_frame();
465    __ restore_LR_CR(R0);
466
467    // Set up the arguments for the exception handler:
468    //  - R3_ARG1: exception oop
469    //  - R4_ARG2: exception pc.
470
471    // Load pending exception oop.
472    __ ld(R3_ARG1,
473              in_bytes(Thread::pending_exception_offset()),
474              R16_thread);
475
476    // The exception pc is the return address in the caller.
477    // Must load it into R4_ARG2.
478    __ mflr(R4_ARG2);
479
480#ifdef ASSERT
481    // Make sure exception is set.
482    {
483      Label L;
484      __ cmpdi(CCR0, R3_ARG1, 0);
485      __ bne(CCR0, L);
486      __ stop("StubRoutines::forward exception: no pending exception (2)");
487      __ bind(L);
488    }
489#endif
490
491    // Clear the pending exception.
492    __ li(R0, 0);
493    __ std(R0,
494               in_bytes(Thread::pending_exception_offset()),
495               R16_thread);
496    // Jump to exception handler.
497    __ bctr();
498
499    return start;
500  }
501
502#undef __
503#define __ masm->
504  // Continuation point for throwing of implicit exceptions that are
505  // not handled in the current activation. Fabricates an exception
506  // oop and initiates normal exception dispatching in this
507  // frame. Only callee-saved registers are preserved (through the
508  // normal register window / RegisterMap handling).  If the compiler
509  // needs all registers to be preserved between the fault point and
510  // the exception handler then it must assume responsibility for that
511  // in AbstractCompiler::continuation_for_implicit_null_exception or
512  // continuation_for_implicit_division_by_zero_exception. All other
513  // implicit exceptions (e.g., NullPointerException or
514  // AbstractMethodError on entry) are either at call sites or
515  // otherwise assume that stack unwinding will be initiated, so
516  // caller saved registers were assumed volatile in the compiler.
517  //
518  // Note that we generate only this stub into a RuntimeStub, because
519  // it needs to be properly traversed and ignored during GC, so we
520  // change the meaning of the "__" macro within this method.
521  //
522  // Note: the routine set_pc_not_at_call_for_caller in
523  // SharedRuntime.cpp requires that this code be generated into a
524  // RuntimeStub.
525  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc,
526                                   Register arg1 = noreg, Register arg2 = noreg) {
527    CodeBuffer code(name, 1024 DEBUG_ONLY(+ 512), 0);
528    MacroAssembler* masm = new MacroAssembler(&code);
529
530    OopMapSet* oop_maps  = new OopMapSet();
531    int frame_size_in_bytes = frame::abi_reg_args_size;
532    OopMap* map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
533
534    address start = __ pc();
535
536    __ save_LR_CR(R11_scratch1);
537
538    // Push a frame.
539    __ push_frame_reg_args(0, R11_scratch1);
540
541    address frame_complete_pc = __ pc();
542
543    if (restore_saved_exception_pc) {
544      __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
545    }
546
547    // Note that we always have a runtime stub frame on the top of
548    // stack by this point. Remember the offset of the instruction
549    // whose address will be moved to R11_scratch1.
550    address gc_map_pc = __ get_PC_trash_LR(R11_scratch1);
551
552    __ set_last_Java_frame(/*sp*/R1_SP, /*pc*/R11_scratch1);
553
554    __ mr(R3_ARG1, R16_thread);
555    if (arg1 != noreg) {
556      __ mr(R4_ARG2, arg1);
557    }
558    if (arg2 != noreg) {
559      __ mr(R5_ARG3, arg2);
560    }
561#if defined(ABI_ELFv2)
562    __ call_c(runtime_entry, relocInfo::none);
563#else
564    __ call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, runtime_entry), relocInfo::none);
565#endif
566
567    // Set an oopmap for the call site.
568    oop_maps->add_gc_map((int)(gc_map_pc - start), map);
569
570    __ reset_last_Java_frame();
571
572#ifdef ASSERT
573    // Make sure that this code is only executed if there is a pending
574    // exception.
575    {
576      Label L;
577      __ ld(R0,
578                in_bytes(Thread::pending_exception_offset()),
579                R16_thread);
580      __ cmpdi(CCR0, R0, 0);
581      __ bne(CCR0, L);
582      __ stop("StubRoutines::throw_exception: no pending exception");
583      __ bind(L);
584    }
585#endif
586
587    // Pop frame.
588    __ pop_frame();
589
590    __ restore_LR_CR(R11_scratch1);
591
592    __ load_const(R11_scratch1, StubRoutines::forward_exception_entry());
593    __ mtctr(R11_scratch1);
594    __ bctr();
595
596    // Create runtime stub with OopMap.
597    RuntimeStub* stub =
598      RuntimeStub::new_runtime_stub(name, &code,
599                                    /*frame_complete=*/ (int)(frame_complete_pc - start),
600                                    frame_size_in_bytes/wordSize,
601                                    oop_maps,
602                                    false);
603    return stub->entry_point();
604  }
605#undef __
606#define __ _masm->
607
608  //  Generate G1 pre-write barrier for array.
609  //
610  //  Input:
611  //     from     - register containing src address (only needed for spilling)
612  //     to       - register containing starting address
613  //     count    - register containing element count
614  //     tmp      - scratch register
615  //
616  //  Kills:
617  //     nothing
618  //
619  void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
620                                       Register preserve1 = noreg, Register preserve2 = noreg) {
621    BarrierSet* const bs = Universe::heap()->barrier_set();
622    switch (bs->kind()) {
623      case BarrierSet::G1SATBCTLogging:
624        // With G1, don't generate the call if we statically know that the target in uninitialized
625        if (!dest_uninitialized) {
626          int spill_slots = 3;
627          if (preserve1 != noreg) { spill_slots++; }
628          if (preserve2 != noreg) { spill_slots++; }
629          const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
630          Label filtered;
631
632          // Is marking active?
633          if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
634            __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
635          } else {
636            guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
637            __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
638          }
639          __ cmpdi(CCR0, Rtmp1, 0);
640          __ beq(CCR0, filtered);
641
642          __ save_LR_CR(R0);
643          __ push_frame(frame_size, R0);
644          int slot_nr = 0;
645          __ std(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
646          __ std(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
647          __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
648          if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
649          if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
650
651          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
652
653          slot_nr = 0;
654          __ ld(from,  frame_size - (++slot_nr) * wordSize, R1_SP);
655          __ ld(to,    frame_size - (++slot_nr) * wordSize, R1_SP);
656          __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
657          if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
658          if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
659          __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
660          __ restore_LR_CR(R0);
661
662          __ bind(filtered);
663        }
664        break;
665      case BarrierSet::CardTableForRS:
666      case BarrierSet::CardTableExtension:
667      case BarrierSet::ModRef:
668        break;
669      default:
670        ShouldNotReachHere();
671    }
672  }
673
674  //  Generate CMS/G1 post-write barrier for array.
675  //
676  //  Input:
677  //     addr     - register containing starting address
678  //     count    - register containing element count
679  //     tmp      - scratch register
680  //
681  //  The input registers and R0 are overwritten.
682  //
683  void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
684    BarrierSet* const bs = Universe::heap()->barrier_set();
685
686    switch (bs->kind()) {
687      case BarrierSet::G1SATBCTLogging:
688        {
689          int spill_slots = (preserve != noreg) ? 1 : 0;
690          const int frame_size = align_size_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
691
692          __ save_LR_CR(R0);
693          __ push_frame(frame_size, R0);
694          if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
695          __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
696          if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
697          __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
698          __ restore_LR_CR(R0);
699        }
700        break;
701      case BarrierSet::CardTableForRS:
702      case BarrierSet::CardTableExtension:
703        {
704          Label Lskip_loop, Lstore_loop;
705          if (UseConcMarkSweepGC) {
706            // TODO PPC port: contribute optimization / requires shared changes
707            __ release();
708          }
709
710          CardTableModRefBS* const ct = barrier_set_cast<CardTableModRefBS>(bs);
711          assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
712          assert_different_registers(addr, count, tmp);
713
714          __ sldi(count, count, LogBytesPerHeapOop);
715          __ addi(count, count, -BytesPerHeapOop);
716          __ add(count, addr, count);
717          // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
718          __ srdi(addr, addr, CardTableModRefBS::card_shift);
719          __ srdi(count, count, CardTableModRefBS::card_shift);
720          __ subf(count, addr, count);
721          assert_different_registers(R0, addr, count, tmp);
722          __ load_const(tmp, (address)ct->byte_map_base);
723          __ addic_(count, count, 1);
724          __ beq(CCR0, Lskip_loop);
725          __ li(R0, 0);
726          __ mtctr(count);
727          // Byte store loop
728          __ bind(Lstore_loop);
729          __ stbx(R0, tmp, addr);
730          __ addi(addr, addr, 1);
731          __ bdnz(Lstore_loop);
732          __ bind(Lskip_loop);
733        }
734      break;
735      case BarrierSet::ModRef:
736        break;
737      default:
738        ShouldNotReachHere();
739    }
740  }
741
742  // Support for void zero_words_aligned8(HeapWord* to, size_t count)
743  //
744  // Arguments:
745  //   to:
746  //   count:
747  //
748  // Destroys:
749  //
750  address generate_zero_words_aligned8() {
751    StubCodeMark mark(this, "StubRoutines", "zero_words_aligned8");
752
753    // Implemented as in ClearArray.
754    address start = __ function_entry();
755
756    Register base_ptr_reg   = R3_ARG1; // tohw (needs to be 8b aligned)
757    Register cnt_dwords_reg = R4_ARG2; // count (in dwords)
758    Register tmp1_reg       = R5_ARG3;
759    Register tmp2_reg       = R6_ARG4;
760    Register zero_reg       = R7_ARG5;
761
762    // Procedure for large arrays (uses data cache block zero instruction).
763    Label dwloop, fast, fastloop, restloop, lastdword, done;
764    int cl_size = VM_Version::L1_data_cache_line_size();
765    int cl_dwords = cl_size >> 3;
766    int cl_dwordaddr_bits = exact_log2(cl_dwords);
767    int min_dcbz = 2; // Needs to be positive, apply dcbz only to at least min_dcbz cache lines.
768
769    // Clear up to 128byte boundary if long enough, dword_cnt=(16-(base>>3))%16.
770    __ dcbtst(base_ptr_reg);                    // Indicate write access to first cache line ...
771    __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if number of dwords is even.
772    __ srdi_(tmp1_reg, cnt_dwords_reg, 1);      // number of double dwords
773    __ load_const_optimized(zero_reg, 0L);      // Use as zero register.
774
775    __ cmpdi(CCR1, tmp2_reg, 0);                // cnt_dwords even?
776    __ beq(CCR0, lastdword);                    // size <= 1
777    __ mtctr(tmp1_reg);                         // Speculatively preload counter for rest loop (>0).
778    __ cmpdi(CCR0, cnt_dwords_reg, (min_dcbz+1)*cl_dwords-1); // Big enough to ensure >=min_dcbz cache lines are included?
779    __ neg(tmp1_reg, base_ptr_reg);             // bit 0..58: bogus, bit 57..60: (16-(base>>3))%16, bit 61..63: 000
780
781    __ blt(CCR0, restloop);                     // Too small. (<31=(2*cl_dwords)-1 is sufficient, but bigger performs better.)
782    __ rldicl_(tmp1_reg, tmp1_reg, 64-3, 64-cl_dwordaddr_bits); // Extract number of dwords to 128byte boundary=(16-(base>>3))%16.
783
784    __ beq(CCR0, fast);                         // already 128byte aligned
785    __ mtctr(tmp1_reg);                         // Set ctr to hit 128byte boundary (0<ctr<cnt).
786    __ subf(cnt_dwords_reg, tmp1_reg, cnt_dwords_reg); // rest (>0 since size>=256-8)
787
788    // Clear in first cache line dword-by-dword if not already 128byte aligned.
789    __ bind(dwloop);
790      __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
791      __ addi(base_ptr_reg, base_ptr_reg, 8);
792    __ bdnz(dwloop);
793
794    // clear 128byte blocks
795    __ bind(fast);
796    __ srdi(tmp1_reg, cnt_dwords_reg, cl_dwordaddr_bits); // loop count for 128byte loop (>0 since size>=256-8)
797    __ andi(tmp2_reg, cnt_dwords_reg, 1);       // to check if rest even
798
799    __ mtctr(tmp1_reg);                         // load counter
800    __ cmpdi(CCR1, tmp2_reg, 0);                // rest even?
801    __ rldicl_(tmp1_reg, cnt_dwords_reg, 63, 65-cl_dwordaddr_bits); // rest in double dwords
802
803    __ bind(fastloop);
804      __ dcbz(base_ptr_reg);                    // Clear 128byte aligned block.
805      __ addi(base_ptr_reg, base_ptr_reg, cl_size);
806    __ bdnz(fastloop);
807
808    //__ dcbtst(base_ptr_reg);                  // Indicate write access to last cache line.
809    __ beq(CCR0, lastdword);                    // rest<=1
810    __ mtctr(tmp1_reg);                         // load counter
811
812    // Clear rest.
813    __ bind(restloop);
814      __ std(zero_reg, 0, base_ptr_reg);        // Clear 8byte aligned block.
815      __ std(zero_reg, 8, base_ptr_reg);        // Clear 8byte aligned block.
816      __ addi(base_ptr_reg, base_ptr_reg, 16);
817    __ bdnz(restloop);
818
819    __ bind(lastdword);
820    __ beq(CCR1, done);
821    __ std(zero_reg, 0, base_ptr_reg);
822    __ bind(done);
823    __ blr();                                   // return
824
825    return start;
826  }
827
828#if !defined(PRODUCT)
829  // Wrapper which calls oopDesc::is_oop_or_null()
830  // Only called by MacroAssembler::verify_oop
831  static void verify_oop_helper(const char* message, oop o) {
832    if (!o->is_oop_or_null()) {
833      fatal("%s", message);
834    }
835    ++ StubRoutines::_verify_oop_count;
836  }
837#endif
838
839  // Return address of code to be called from code generated by
840  // MacroAssembler::verify_oop.
841  //
842  // Don't generate, rather use C++ code.
843  address generate_verify_oop() {
844    // this is actually a `FunctionDescriptor*'.
845    address start = 0;
846
847#if !defined(PRODUCT)
848    start = CAST_FROM_FN_PTR(address, verify_oop_helper);
849#endif
850
851    return start;
852  }
853
854  // Fairer handling of safepoints for native methods.
855  //
856  // Generate code which reads from the polling page. This special handling is needed as the
857  // linux-ppc64 kernel before 2.6.6 doesn't set si_addr on some segfaults in 64bit mode
858  // (cf. http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.6), especially when we try
859  // to read from the safepoint polling page.
860  address generate_load_from_poll() {
861    StubCodeMark mark(this, "StubRoutines", "generate_load_from_poll");
862    address start = __ function_entry();
863    __ unimplemented("StubRoutines::verify_oop", 95);  // TODO PPC port
864    return start;
865  }
866
867  // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
868  //
869  // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
870  // tracing(-XX:+TraceOptimizeFill) shows the intrinsic replacement doesn't happen at all!
871  //
872  // Source code in function is_range_check_if() shows that OptimizeFill relaxed the condition
873  // for turning on loop predication optimization, and hence the behavior of "array range check"
874  // and "loop invariant check" could be influenced, which potentially boosted JVM98.
875  //
876  // Generate stub for disjoint short fill. If "aligned" is true, the
877  // "to" address is assumed to be heapword aligned.
878  //
879  // Arguments for generated stub:
880  //   to:    R3_ARG1
881  //   value: R4_ARG2
882  //   count: R5_ARG3 treated as signed
883  //
884  address generate_fill(BasicType t, bool aligned, const char* name) {
885    StubCodeMark mark(this, "StubRoutines", name);
886    address start = __ function_entry();
887
888    const Register to    = R3_ARG1;   // source array address
889    const Register value = R4_ARG2;   // fill value
890    const Register count = R5_ARG3;   // elements count
891    const Register temp  = R6_ARG4;   // temp register
892
893    //assert_clean_int(count, O3);    // Make sure 'count' is clean int.
894
895    Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
896    Label L_fill_2_bytes, L_fill_4_bytes, L_fill_elements, L_fill_32_bytes;
897
898    int shift = -1;
899    switch (t) {
900       case T_BYTE:
901        shift = 2;
902        // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
903        __ rldimi(value, value, 8, 48);     // 8 bit -> 16 bit
904        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
905        __ blt(CCR0, L_fill_elements);
906        __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
907        break;
908       case T_SHORT:
909        shift = 1;
910        // Clone bytes (zero extend not needed because store instructions below ignore high order bytes).
911        __ rldimi(value, value, 16, 32);    // 16 bit -> 32 bit
912        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
913        __ blt(CCR0, L_fill_elements);
914        break;
915      case T_INT:
916        shift = 0;
917        __ cmpdi(CCR0, count, 2<<shift);    // Short arrays (< 8 bytes) fill by element.
918        __ blt(CCR0, L_fill_4_bytes);
919        break;
920      default: ShouldNotReachHere();
921    }
922
923    if (!aligned && (t == T_BYTE || t == T_SHORT)) {
924      // Align source address at 4 bytes address boundary.
925      if (t == T_BYTE) {
926        // One byte misalignment happens only for byte arrays.
927        __ andi_(temp, to, 1);
928        __ beq(CCR0, L_skip_align1);
929        __ stb(value, 0, to);
930        __ addi(to, to, 1);
931        __ addi(count, count, -1);
932        __ bind(L_skip_align1);
933      }
934      // Two bytes misalignment happens only for byte and short (char) arrays.
935      __ andi_(temp, to, 2);
936      __ beq(CCR0, L_skip_align2);
937      __ sth(value, 0, to);
938      __ addi(to, to, 2);
939      __ addi(count, count, -(1 << (shift - 1)));
940      __ bind(L_skip_align2);
941    }
942
943    if (!aligned) {
944      // Align to 8 bytes, we know we are 4 byte aligned to start.
945      __ andi_(temp, to, 7);
946      __ beq(CCR0, L_fill_32_bytes);
947      __ stw(value, 0, to);
948      __ addi(to, to, 4);
949      __ addi(count, count, -(1 << shift));
950      __ bind(L_fill_32_bytes);
951    }
952
953    __ li(temp, 8<<shift);                  // Prepare for 32 byte loop.
954    // Clone bytes int->long as above.
955    __ rldimi(value, value, 32, 0);         // 32 bit -> 64 bit
956
957    Label L_check_fill_8_bytes;
958    // Fill 32-byte chunks.
959    __ subf_(count, temp, count);
960    __ blt(CCR0, L_check_fill_8_bytes);
961
962    Label L_fill_32_bytes_loop;
963    __ align(32);
964    __ bind(L_fill_32_bytes_loop);
965
966    __ std(value, 0, to);
967    __ std(value, 8, to);
968    __ subf_(count, temp, count);           // Update count.
969    __ std(value, 16, to);
970    __ std(value, 24, to);
971
972    __ addi(to, to, 32);
973    __ bge(CCR0, L_fill_32_bytes_loop);
974
975    __ bind(L_check_fill_8_bytes);
976    __ add_(count, temp, count);
977    __ beq(CCR0, L_exit);
978    __ addic_(count, count, -(2 << shift));
979    __ blt(CCR0, L_fill_4_bytes);
980
981    //
982    // Length is too short, just fill 8 bytes at a time.
983    //
984    Label L_fill_8_bytes_loop;
985    __ bind(L_fill_8_bytes_loop);
986    __ std(value, 0, to);
987    __ addic_(count, count, -(2 << shift));
988    __ addi(to, to, 8);
989    __ bge(CCR0, L_fill_8_bytes_loop);
990
991    // Fill trailing 4 bytes.
992    __ bind(L_fill_4_bytes);
993    __ andi_(temp, count, 1<<shift);
994    __ beq(CCR0, L_fill_2_bytes);
995
996    __ stw(value, 0, to);
997    if (t == T_BYTE || t == T_SHORT) {
998      __ addi(to, to, 4);
999      // Fill trailing 2 bytes.
1000      __ bind(L_fill_2_bytes);
1001      __ andi_(temp, count, 1<<(shift-1));
1002      __ beq(CCR0, L_fill_byte);
1003      __ sth(value, 0, to);
1004      if (t == T_BYTE) {
1005        __ addi(to, to, 2);
1006        // Fill trailing byte.
1007        __ bind(L_fill_byte);
1008        __ andi_(count, count, 1);
1009        __ beq(CCR0, L_exit);
1010        __ stb(value, 0, to);
1011      } else {
1012        __ bind(L_fill_byte);
1013      }
1014    } else {
1015      __ bind(L_fill_2_bytes);
1016    }
1017    __ bind(L_exit);
1018    __ blr();
1019
1020    // Handle copies less than 8 bytes. Int is handled elsewhere.
1021    if (t == T_BYTE) {
1022      __ bind(L_fill_elements);
1023      Label L_fill_2, L_fill_4;
1024      __ andi_(temp, count, 1);
1025      __ beq(CCR0, L_fill_2);
1026      __ stb(value, 0, to);
1027      __ addi(to, to, 1);
1028      __ bind(L_fill_2);
1029      __ andi_(temp, count, 2);
1030      __ beq(CCR0, L_fill_4);
1031      __ stb(value, 0, to);
1032      __ stb(value, 0, to);
1033      __ addi(to, to, 2);
1034      __ bind(L_fill_4);
1035      __ andi_(temp, count, 4);
1036      __ beq(CCR0, L_exit);
1037      __ stb(value, 0, to);
1038      __ stb(value, 1, to);
1039      __ stb(value, 2, to);
1040      __ stb(value, 3, to);
1041      __ blr();
1042    }
1043
1044    if (t == T_SHORT) {
1045      Label L_fill_2;
1046      __ bind(L_fill_elements);
1047      __ andi_(temp, count, 1);
1048      __ beq(CCR0, L_fill_2);
1049      __ sth(value, 0, to);
1050      __ addi(to, to, 2);
1051      __ bind(L_fill_2);
1052      __ andi_(temp, count, 2);
1053      __ beq(CCR0, L_exit);
1054      __ sth(value, 0, to);
1055      __ sth(value, 2, to);
1056      __ blr();
1057    }
1058    return start;
1059  }
1060
1061  inline void assert_positive_int(Register count) {
1062#ifdef ASSERT
1063    __ srdi_(R0, count, 31);
1064    __ asm_assert_eq("missing zero extend", 0xAFFE);
1065#endif
1066  }
1067
1068  // Generate overlap test for array copy stubs.
1069  //
1070  // Input:
1071  //   R3_ARG1    -  from
1072  //   R4_ARG2    -  to
1073  //   R5_ARG3    -  element count
1074  //
1075  void array_overlap_test(address no_overlap_target, int log2_elem_size) {
1076    Register tmp1 = R6_ARG4;
1077    Register tmp2 = R7_ARG5;
1078
1079    assert_positive_int(R5_ARG3);
1080
1081    __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
1082    __ sldi(tmp2, R5_ARG3, log2_elem_size); // size in bytes
1083    __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
1084    __ cmpld(CCR1, tmp1, tmp2);
1085    __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
1086    // Overlaps if Src before dst and distance smaller than size.
1087    // Branch to forward copy routine otherwise (within range of 32kB).
1088    __ bc(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::less), no_overlap_target);
1089
1090    // need to copy backwards
1091  }
1092
1093  // The guideline in the implementations of generate_disjoint_xxx_copy
1094  // (xxx=byte,short,int,long,oop) is to copy as many elements as possible with
1095  // single instructions, but to avoid alignment interrupts (see subsequent
1096  // comment). Furthermore, we try to minimize misaligned access, even
1097  // though they cause no alignment interrupt.
1098  //
1099  // In Big-Endian mode, the PowerPC architecture requires implementations to
1100  // handle automatically misaligned integer halfword and word accesses,
1101  // word-aligned integer doubleword accesses, and word-aligned floating-point
1102  // accesses. Other accesses may or may not generate an Alignment interrupt
1103  // depending on the implementation.
1104  // Alignment interrupt handling may require on the order of hundreds of cycles,
1105  // so every effort should be made to avoid misaligned memory values.
1106  //
1107  //
1108  // Generate stub for disjoint byte copy.  If "aligned" is true, the
1109  // "from" and "to" addresses are assumed to be heapword aligned.
1110  //
1111  // Arguments for generated stub:
1112  //      from:  R3_ARG1
1113  //      to:    R4_ARG2
1114  //      count: R5_ARG3 treated as signed
1115  //
1116  address generate_disjoint_byte_copy(bool aligned, const char * name) {
1117    StubCodeMark mark(this, "StubRoutines", name);
1118    address start = __ function_entry();
1119    assert_positive_int(R5_ARG3);
1120
1121    Register tmp1 = R6_ARG4;
1122    Register tmp2 = R7_ARG5;
1123    Register tmp3 = R8_ARG6;
1124    Register tmp4 = R9_ARG7;
1125
1126    Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8, l_9;
1127
1128    // Don't try anything fancy if arrays don't have many elements.
1129    __ li(tmp3, 0);
1130    __ cmpwi(CCR0, R5_ARG3, 17);
1131    __ ble(CCR0, l_6); // copy 4 at a time
1132
1133    if (!aligned) {
1134      __ xorr(tmp1, R3_ARG1, R4_ARG2);
1135      __ andi_(tmp1, tmp1, 3);
1136      __ bne(CCR0, l_6); // If arrays don't have the same alignment mod 4, do 4 element copy.
1137
1138      // Copy elements if necessary to align to 4 bytes.
1139      __ neg(tmp1, R3_ARG1); // Compute distance to alignment boundary.
1140      __ andi_(tmp1, tmp1, 3);
1141      __ beq(CCR0, l_2);
1142
1143      __ subf(R5_ARG3, tmp1, R5_ARG3);
1144      __ bind(l_9);
1145      __ lbz(tmp2, 0, R3_ARG1);
1146      __ addic_(tmp1, tmp1, -1);
1147      __ stb(tmp2, 0, R4_ARG2);
1148      __ addi(R3_ARG1, R3_ARG1, 1);
1149      __ addi(R4_ARG2, R4_ARG2, 1);
1150      __ bne(CCR0, l_9);
1151
1152      __ bind(l_2);
1153    }
1154
1155    // copy 8 elements at a time
1156    __ xorr(tmp2, R3_ARG1, R4_ARG2); // skip if src & dest have differing alignment mod 8
1157    __ andi_(tmp1, tmp2, 7);
1158    __ bne(CCR0, l_7); // not same alignment -> to or from is aligned -> copy 8
1159
1160    // copy a 2-element word if necessary to align to 8 bytes
1161    __ andi_(R0, R3_ARG1, 7);
1162    __ beq(CCR0, l_7);
1163
1164    __ lwzx(tmp2, R3_ARG1, tmp3);
1165    __ addi(R5_ARG3, R5_ARG3, -4);
1166    __ stwx(tmp2, R4_ARG2, tmp3);
1167    { // FasterArrayCopy
1168      __ addi(R3_ARG1, R3_ARG1, 4);
1169      __ addi(R4_ARG2, R4_ARG2, 4);
1170    }
1171    __ bind(l_7);
1172
1173    { // FasterArrayCopy
1174      __ cmpwi(CCR0, R5_ARG3, 31);
1175      __ ble(CCR0, l_6); // copy 2 at a time if less than 32 elements remain
1176
1177      __ srdi(tmp1, R5_ARG3, 5);
1178      __ andi_(R5_ARG3, R5_ARG3, 31);
1179      __ mtctr(tmp1);
1180
1181      __ bind(l_8);
1182      // Use unrolled version for mass copying (copy 32 elements a time)
1183      // Load feeding store gets zero latency on Power6, however not on Power5.
1184      // Therefore, the following sequence is made for the good of both.
1185      __ ld(tmp1, 0, R3_ARG1);
1186      __ ld(tmp2, 8, R3_ARG1);
1187      __ ld(tmp3, 16, R3_ARG1);
1188      __ ld(tmp4, 24, R3_ARG1);
1189      __ std(tmp1, 0, R4_ARG2);
1190      __ std(tmp2, 8, R4_ARG2);
1191      __ std(tmp3, 16, R4_ARG2);
1192      __ std(tmp4, 24, R4_ARG2);
1193      __ addi(R3_ARG1, R3_ARG1, 32);
1194      __ addi(R4_ARG2, R4_ARG2, 32);
1195      __ bdnz(l_8);
1196    }
1197
1198    __ bind(l_6);
1199
1200    // copy 4 elements at a time
1201    __ cmpwi(CCR0, R5_ARG3, 4);
1202    __ blt(CCR0, l_1);
1203    __ srdi(tmp1, R5_ARG3, 2);
1204    __ mtctr(tmp1); // is > 0
1205    __ andi_(R5_ARG3, R5_ARG3, 3);
1206
1207    { // FasterArrayCopy
1208      __ addi(R3_ARG1, R3_ARG1, -4);
1209      __ addi(R4_ARG2, R4_ARG2, -4);
1210      __ bind(l_3);
1211      __ lwzu(tmp2, 4, R3_ARG1);
1212      __ stwu(tmp2, 4, R4_ARG2);
1213      __ bdnz(l_3);
1214      __ addi(R3_ARG1, R3_ARG1, 4);
1215      __ addi(R4_ARG2, R4_ARG2, 4);
1216    }
1217
1218    // do single element copy
1219    __ bind(l_1);
1220    __ cmpwi(CCR0, R5_ARG3, 0);
1221    __ beq(CCR0, l_4);
1222
1223    { // FasterArrayCopy
1224      __ mtctr(R5_ARG3);
1225      __ addi(R3_ARG1, R3_ARG1, -1);
1226      __ addi(R4_ARG2, R4_ARG2, -1);
1227
1228      __ bind(l_5);
1229      __ lbzu(tmp2, 1, R3_ARG1);
1230      __ stbu(tmp2, 1, R4_ARG2);
1231      __ bdnz(l_5);
1232    }
1233
1234    __ bind(l_4);
1235    __ li(R3_RET, 0); // return 0
1236    __ blr();
1237
1238    return start;
1239  }
1240
1241  // Generate stub for conjoint byte copy.  If "aligned" is true, the
1242  // "from" and "to" addresses are assumed to be heapword aligned.
1243  //
1244  // Arguments for generated stub:
1245  //      from:  R3_ARG1
1246  //      to:    R4_ARG2
1247  //      count: R5_ARG3 treated as signed
1248  //
1249  address generate_conjoint_byte_copy(bool aligned, const char * name) {
1250    StubCodeMark mark(this, "StubRoutines", name);
1251    address start = __ function_entry();
1252    assert_positive_int(R5_ARG3);
1253
1254    Register tmp1 = R6_ARG4;
1255    Register tmp2 = R7_ARG5;
1256    Register tmp3 = R8_ARG6;
1257
1258    address nooverlap_target = aligned ?
1259      STUB_ENTRY(arrayof_jbyte_disjoint_arraycopy) :
1260      STUB_ENTRY(jbyte_disjoint_arraycopy);
1261
1262    array_overlap_test(nooverlap_target, 0);
1263    // Do reverse copy. We assume the case of actual overlap is rare enough
1264    // that we don't have to optimize it.
1265    Label l_1, l_2;
1266
1267    __ b(l_2);
1268    __ bind(l_1);
1269    __ stbx(tmp1, R4_ARG2, R5_ARG3);
1270    __ bind(l_2);
1271    __ addic_(R5_ARG3, R5_ARG3, -1);
1272    __ lbzx(tmp1, R3_ARG1, R5_ARG3);
1273    __ bge(CCR0, l_1);
1274
1275    __ li(R3_RET, 0); // return 0
1276    __ blr();
1277
1278    return start;
1279  }
1280
1281  // Generate stub for disjoint short copy.  If "aligned" is true, the
1282  // "from" and "to" addresses are assumed to be heapword aligned.
1283  //
1284  // Arguments for generated stub:
1285  //      from:  R3_ARG1
1286  //      to:    R4_ARG2
1287  //  elm.count: R5_ARG3 treated as signed
1288  //
1289  // Strategy for aligned==true:
1290  //
1291  //  If length <= 9:
1292  //     1. copy 2 elements at a time (l_6)
1293  //     2. copy last element if original element count was odd (l_1)
1294  //
1295  //  If length > 9:
1296  //     1. copy 4 elements at a time until less than 4 elements are left (l_7)
1297  //     2. copy 2 elements at a time until less than 2 elements are left (l_6)
1298  //     3. copy last element if one was left in step 2. (l_1)
1299  //
1300  //
1301  // Strategy for aligned==false:
1302  //
1303  //  If length <= 9: same as aligned==true case, but NOTE: load/stores
1304  //                  can be unaligned (see comment below)
1305  //
1306  //  If length > 9:
1307  //     1. continue with step 6. if the alignment of from and to mod 4
1308  //        is different.
1309  //     2. align from and to to 4 bytes by copying 1 element if necessary
1310  //     3. at l_2 from and to are 4 byte aligned; continue with
1311  //        5. if they cannot be aligned to 8 bytes because they have
1312  //        got different alignment mod 8.
1313  //     4. at this point we know that both, from and to, have the same
1314  //        alignment mod 8, now copy one element if necessary to get
1315  //        8 byte alignment of from and to.
1316  //     5. copy 4 elements at a time until less than 4 elements are
1317  //        left; depending on step 3. all load/stores are aligned or
1318  //        either all loads or all stores are unaligned.
1319  //     6. copy 2 elements at a time until less than 2 elements are
1320  //        left (l_6); arriving here from step 1., there is a chance
1321  //        that all accesses are unaligned.
1322  //     7. copy last element if one was left in step 6. (l_1)
1323  //
1324  //  There are unaligned data accesses using integer load/store
1325  //  instructions in this stub. POWER allows such accesses.
1326  //
1327  //  According to the manuals (PowerISA_V2.06_PUBLIC, Book II,
1328  //  Chapter 2: Effect of Operand Placement on Performance) unaligned
1329  //  integer load/stores have good performance. Only unaligned
1330  //  floating point load/stores can have poor performance.
1331  //
1332  //  TODO:
1333  //
1334  //  1. check if aligning the backbranch target of loops is beneficial
1335  //
1336  address generate_disjoint_short_copy(bool aligned, const char * name) {
1337    StubCodeMark mark(this, "StubRoutines", name);
1338
1339    Register tmp1 = R6_ARG4;
1340    Register tmp2 = R7_ARG5;
1341    Register tmp3 = R8_ARG6;
1342    Register tmp4 = R9_ARG7;
1343
1344    address start = __ function_entry();
1345    assert_positive_int(R5_ARG3);
1346
1347      Label l_1, l_2, l_3, l_4, l_5, l_6, l_7, l_8;
1348
1349    // don't try anything fancy if arrays don't have many elements
1350    __ li(tmp3, 0);
1351    __ cmpwi(CCR0, R5_ARG3, 9);
1352    __ ble(CCR0, l_6); // copy 2 at a time
1353
1354    if (!aligned) {
1355      __ xorr(tmp1, R3_ARG1, R4_ARG2);
1356      __ andi_(tmp1, tmp1, 3);
1357      __ bne(CCR0, l_6); // if arrays don't have the same alignment mod 4, do 2 element copy
1358
1359      // At this point it is guaranteed that both, from and to have the same alignment mod 4.
1360
1361      // Copy 1 element if necessary to align to 4 bytes.
1362      __ andi_(tmp1, R3_ARG1, 3);
1363      __ beq(CCR0, l_2);
1364
1365      __ lhz(tmp2, 0, R3_ARG1);
1366      __ addi(R3_ARG1, R3_ARG1, 2);
1367      __ sth(tmp2, 0, R4_ARG2);
1368      __ addi(R4_ARG2, R4_ARG2, 2);
1369      __ addi(R5_ARG3, R5_ARG3, -1);
1370      __ bind(l_2);
1371
1372      // At this point the positions of both, from and to, are at least 4 byte aligned.
1373
1374      // Copy 4 elements at a time.
1375      // Align to 8 bytes, but only if both, from and to, have same alignment mod 8.
1376      __ xorr(tmp2, R3_ARG1, R4_ARG2);
1377      __ andi_(tmp1, tmp2, 7);
1378      __ bne(CCR0, l_7); // not same alignment mod 8 -> copy 4, either from or to will be unaligned
1379
1380      // Copy a 2-element word if necessary to align to 8 bytes.
1381      __ andi_(R0, R3_ARG1, 7);
1382      __ beq(CCR0, l_7);
1383
1384      __ lwzx(tmp2, R3_ARG1, tmp3);
1385      __ addi(R5_ARG3, R5_ARG3, -2);
1386      __ stwx(tmp2, R4_ARG2, tmp3);
1387      { // FasterArrayCopy
1388        __ addi(R3_ARG1, R3_ARG1, 4);
1389        __ addi(R4_ARG2, R4_ARG2, 4);
1390      }
1391    }
1392
1393    __ bind(l_7);
1394
1395    // Copy 4 elements at a time; either the loads or the stores can
1396    // be unaligned if aligned == false.
1397
1398    { // FasterArrayCopy
1399      __ cmpwi(CCR0, R5_ARG3, 15);
1400      __ ble(CCR0, l_6); // copy 2 at a time if less than 16 elements remain
1401
1402      __ srdi(tmp1, R5_ARG3, 4);
1403      __ andi_(R5_ARG3, R5_ARG3, 15);
1404      __ mtctr(tmp1);
1405
1406      __ bind(l_8);
1407      // Use unrolled version for mass copying (copy 16 elements a time).
1408      // Load feeding store gets zero latency on Power6, however not on Power5.
1409      // Therefore, the following sequence is made for the good of both.
1410      __ ld(tmp1, 0, R3_ARG1);
1411      __ ld(tmp2, 8, R3_ARG1);
1412      __ ld(tmp3, 16, R3_ARG1);
1413      __ ld(tmp4, 24, R3_ARG1);
1414      __ std(tmp1, 0, R4_ARG2);
1415      __ std(tmp2, 8, R4_ARG2);
1416      __ std(tmp3, 16, R4_ARG2);
1417      __ std(tmp4, 24, R4_ARG2);
1418      __ addi(R3_ARG1, R3_ARG1, 32);
1419      __ addi(R4_ARG2, R4_ARG2, 32);
1420      __ bdnz(l_8);
1421    }
1422    __ bind(l_6);
1423
1424    // copy 2 elements at a time
1425    { // FasterArrayCopy
1426      __ cmpwi(CCR0, R5_ARG3, 2);
1427      __ blt(CCR0, l_1);
1428      __ srdi(tmp1, R5_ARG3, 1);
1429      __ andi_(R5_ARG3, R5_ARG3, 1);
1430
1431      __ addi(R3_ARG1, R3_ARG1, -4);
1432      __ addi(R4_ARG2, R4_ARG2, -4);
1433      __ mtctr(tmp1);
1434
1435      __ bind(l_3);
1436      __ lwzu(tmp2, 4, R3_ARG1);
1437      __ stwu(tmp2, 4, R4_ARG2);
1438      __ bdnz(l_3);
1439
1440      __ addi(R3_ARG1, R3_ARG1, 4);
1441      __ addi(R4_ARG2, R4_ARG2, 4);
1442    }
1443
1444    // do single element copy
1445    __ bind(l_1);
1446    __ cmpwi(CCR0, R5_ARG3, 0);
1447    __ beq(CCR0, l_4);
1448
1449    { // FasterArrayCopy
1450      __ mtctr(R5_ARG3);
1451      __ addi(R3_ARG1, R3_ARG1, -2);
1452      __ addi(R4_ARG2, R4_ARG2, -2);
1453
1454      __ bind(l_5);
1455      __ lhzu(tmp2, 2, R3_ARG1);
1456      __ sthu(tmp2, 2, R4_ARG2);
1457      __ bdnz(l_5);
1458    }
1459    __ bind(l_4);
1460    __ li(R3_RET, 0); // return 0
1461    __ blr();
1462
1463    return start;
1464  }
1465
1466  // Generate stub for conjoint short copy.  If "aligned" is true, the
1467  // "from" and "to" addresses are assumed to be heapword aligned.
1468  //
1469  // Arguments for generated stub:
1470  //      from:  R3_ARG1
1471  //      to:    R4_ARG2
1472  //      count: R5_ARG3 treated as signed
1473  //
1474  address generate_conjoint_short_copy(bool aligned, const char * name) {
1475    StubCodeMark mark(this, "StubRoutines", name);
1476    address start = __ function_entry();
1477    assert_positive_int(R5_ARG3);
1478
1479    Register tmp1 = R6_ARG4;
1480    Register tmp2 = R7_ARG5;
1481    Register tmp3 = R8_ARG6;
1482
1483    address nooverlap_target = aligned ?
1484      STUB_ENTRY(arrayof_jshort_disjoint_arraycopy) :
1485      STUB_ENTRY(jshort_disjoint_arraycopy);
1486
1487    array_overlap_test(nooverlap_target, 1);
1488
1489    Label l_1, l_2;
1490    __ sldi(tmp1, R5_ARG3, 1);
1491    __ b(l_2);
1492    __ bind(l_1);
1493    __ sthx(tmp2, R4_ARG2, tmp1);
1494    __ bind(l_2);
1495    __ addic_(tmp1, tmp1, -2);
1496    __ lhzx(tmp2, R3_ARG1, tmp1);
1497    __ bge(CCR0, l_1);
1498
1499    __ li(R3_RET, 0); // return 0
1500    __ blr();
1501
1502    return start;
1503  }
1504
1505  // Generate core code for disjoint int copy (and oop copy on 32-bit).  If "aligned"
1506  // is true, the "from" and "to" addresses are assumed to be heapword aligned.
1507  //
1508  // Arguments:
1509  //      from:  R3_ARG1
1510  //      to:    R4_ARG2
1511  //      count: R5_ARG3 treated as signed
1512  //
1513  void generate_disjoint_int_copy_core(bool aligned) {
1514    Register tmp1 = R6_ARG4;
1515    Register tmp2 = R7_ARG5;
1516    Register tmp3 = R8_ARG6;
1517    Register tmp4 = R0;
1518
1519    Label l_1, l_2, l_3, l_4, l_5, l_6;
1520
1521    // for short arrays, just do single element copy
1522    __ li(tmp3, 0);
1523    __ cmpwi(CCR0, R5_ARG3, 5);
1524    __ ble(CCR0, l_2);
1525
1526    if (!aligned) {
1527        // check if arrays have same alignment mod 8.
1528        __ xorr(tmp1, R3_ARG1, R4_ARG2);
1529        __ andi_(R0, tmp1, 7);
1530        // Not the same alignment, but ld and std just need to be 4 byte aligned.
1531        __ bne(CCR0, l_4); // to OR from is 8 byte aligned -> copy 2 at a time
1532
1533        // copy 1 element to align to and from on an 8 byte boundary
1534        __ andi_(R0, R3_ARG1, 7);
1535        __ beq(CCR0, l_4);
1536
1537        __ lwzx(tmp2, R3_ARG1, tmp3);
1538        __ addi(R5_ARG3, R5_ARG3, -1);
1539        __ stwx(tmp2, R4_ARG2, tmp3);
1540        { // FasterArrayCopy
1541          __ addi(R3_ARG1, R3_ARG1, 4);
1542          __ addi(R4_ARG2, R4_ARG2, 4);
1543        }
1544        __ bind(l_4);
1545      }
1546
1547    { // FasterArrayCopy
1548      __ cmpwi(CCR0, R5_ARG3, 7);
1549      __ ble(CCR0, l_2); // copy 1 at a time if less than 8 elements remain
1550
1551      __ srdi(tmp1, R5_ARG3, 3);
1552      __ andi_(R5_ARG3, R5_ARG3, 7);
1553      __ mtctr(tmp1);
1554
1555      __ bind(l_6);
1556      // Use unrolled version for mass copying (copy 8 elements a time).
1557      // Load feeding store gets zero latency on power6, however not on power 5.
1558      // Therefore, the following sequence is made for the good of both.
1559      __ ld(tmp1, 0, R3_ARG1);
1560      __ ld(tmp2, 8, R3_ARG1);
1561      __ ld(tmp3, 16, R3_ARG1);
1562      __ ld(tmp4, 24, R3_ARG1);
1563      __ std(tmp1, 0, R4_ARG2);
1564      __ std(tmp2, 8, R4_ARG2);
1565      __ std(tmp3, 16, R4_ARG2);
1566      __ std(tmp4, 24, R4_ARG2);
1567      __ addi(R3_ARG1, R3_ARG1, 32);
1568      __ addi(R4_ARG2, R4_ARG2, 32);
1569      __ bdnz(l_6);
1570    }
1571
1572    // copy 1 element at a time
1573    __ bind(l_2);
1574    __ cmpwi(CCR0, R5_ARG3, 0);
1575    __ beq(CCR0, l_1);
1576
1577    { // FasterArrayCopy
1578      __ mtctr(R5_ARG3);
1579      __ addi(R3_ARG1, R3_ARG1, -4);
1580      __ addi(R4_ARG2, R4_ARG2, -4);
1581
1582      __ bind(l_3);
1583      __ lwzu(tmp2, 4, R3_ARG1);
1584      __ stwu(tmp2, 4, R4_ARG2);
1585      __ bdnz(l_3);
1586    }
1587
1588    __ bind(l_1);
1589    return;
1590  }
1591
1592  // Generate stub for disjoint int copy.  If "aligned" is true, the
1593  // "from" and "to" addresses are assumed to be heapword aligned.
1594  //
1595  // Arguments for generated stub:
1596  //      from:  R3_ARG1
1597  //      to:    R4_ARG2
1598  //      count: R5_ARG3 treated as signed
1599  //
1600  address generate_disjoint_int_copy(bool aligned, const char * name) {
1601    StubCodeMark mark(this, "StubRoutines", name);
1602    address start = __ function_entry();
1603    assert_positive_int(R5_ARG3);
1604    generate_disjoint_int_copy_core(aligned);
1605    __ li(R3_RET, 0); // return 0
1606    __ blr();
1607    return start;
1608  }
1609
1610  // Generate core code for conjoint int copy (and oop copy on
1611  // 32-bit).  If "aligned" is true, the "from" and "to" addresses
1612  // are assumed to be heapword aligned.
1613  //
1614  // Arguments:
1615  //      from:  R3_ARG1
1616  //      to:    R4_ARG2
1617  //      count: R5_ARG3 treated as signed
1618  //
1619  void generate_conjoint_int_copy_core(bool aligned) {
1620    // Do reverse copy.  We assume the case of actual overlap is rare enough
1621    // that we don't have to optimize it.
1622
1623    Label l_1, l_2, l_3, l_4, l_5, l_6;
1624
1625    Register tmp1 = R6_ARG4;
1626    Register tmp2 = R7_ARG5;
1627    Register tmp3 = R8_ARG6;
1628    Register tmp4 = R0;
1629
1630    { // FasterArrayCopy
1631      __ cmpwi(CCR0, R5_ARG3, 0);
1632      __ beq(CCR0, l_6);
1633
1634      __ sldi(R5_ARG3, R5_ARG3, 2);
1635      __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1636      __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1637      __ srdi(R5_ARG3, R5_ARG3, 2);
1638
1639      __ cmpwi(CCR0, R5_ARG3, 7);
1640      __ ble(CCR0, l_5); // copy 1 at a time if less than 8 elements remain
1641
1642      __ srdi(tmp1, R5_ARG3, 3);
1643      __ andi(R5_ARG3, R5_ARG3, 7);
1644      __ mtctr(tmp1);
1645
1646      __ bind(l_4);
1647      // Use unrolled version for mass copying (copy 4 elements a time).
1648      // Load feeding store gets zero latency on Power6, however not on Power5.
1649      // Therefore, the following sequence is made for the good of both.
1650      __ addi(R3_ARG1, R3_ARG1, -32);
1651      __ addi(R4_ARG2, R4_ARG2, -32);
1652      __ ld(tmp4, 24, R3_ARG1);
1653      __ ld(tmp3, 16, R3_ARG1);
1654      __ ld(tmp2, 8, R3_ARG1);
1655      __ ld(tmp1, 0, R3_ARG1);
1656      __ std(tmp4, 24, R4_ARG2);
1657      __ std(tmp3, 16, R4_ARG2);
1658      __ std(tmp2, 8, R4_ARG2);
1659      __ std(tmp1, 0, R4_ARG2);
1660      __ bdnz(l_4);
1661
1662      __ cmpwi(CCR0, R5_ARG3, 0);
1663      __ beq(CCR0, l_6);
1664
1665      __ bind(l_5);
1666      __ mtctr(R5_ARG3);
1667      __ bind(l_3);
1668      __ lwz(R0, -4, R3_ARG1);
1669      __ stw(R0, -4, R4_ARG2);
1670      __ addi(R3_ARG1, R3_ARG1, -4);
1671      __ addi(R4_ARG2, R4_ARG2, -4);
1672      __ bdnz(l_3);
1673
1674      __ bind(l_6);
1675    }
1676  }
1677
1678  // Generate stub for conjoint int copy.  If "aligned" is true, the
1679  // "from" and "to" addresses are assumed to be heapword aligned.
1680  //
1681  // Arguments for generated stub:
1682  //      from:  R3_ARG1
1683  //      to:    R4_ARG2
1684  //      count: R5_ARG3 treated as signed
1685  //
1686  address generate_conjoint_int_copy(bool aligned, const char * name) {
1687    StubCodeMark mark(this, "StubRoutines", name);
1688    address start = __ function_entry();
1689    assert_positive_int(R5_ARG3);
1690    address nooverlap_target = aligned ?
1691      STUB_ENTRY(arrayof_jint_disjoint_arraycopy) :
1692      STUB_ENTRY(jint_disjoint_arraycopy);
1693
1694    array_overlap_test(nooverlap_target, 2);
1695
1696    generate_conjoint_int_copy_core(aligned);
1697
1698    __ li(R3_RET, 0); // return 0
1699    __ blr();
1700
1701    return start;
1702  }
1703
1704  // Generate core code for disjoint long copy (and oop copy on
1705  // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1706  // are assumed to be heapword aligned.
1707  //
1708  // Arguments:
1709  //      from:  R3_ARG1
1710  //      to:    R4_ARG2
1711  //      count: R5_ARG3 treated as signed
1712  //
1713  void generate_disjoint_long_copy_core(bool aligned) {
1714    Register tmp1 = R6_ARG4;
1715    Register tmp2 = R7_ARG5;
1716    Register tmp3 = R8_ARG6;
1717    Register tmp4 = R0;
1718
1719    Label l_1, l_2, l_3, l_4;
1720
1721    { // FasterArrayCopy
1722      __ cmpwi(CCR0, R5_ARG3, 3);
1723      __ ble(CCR0, l_3); // copy 1 at a time if less than 4 elements remain
1724
1725      __ srdi(tmp1, R5_ARG3, 2);
1726      __ andi_(R5_ARG3, R5_ARG3, 3);
1727      __ mtctr(tmp1);
1728
1729      __ bind(l_4);
1730      // Use unrolled version for mass copying (copy 4 elements a time).
1731      // Load feeding store gets zero latency on Power6, however not on Power5.
1732      // Therefore, the following sequence is made for the good of both.
1733      __ ld(tmp1, 0, R3_ARG1);
1734      __ ld(tmp2, 8, R3_ARG1);
1735      __ ld(tmp3, 16, R3_ARG1);
1736      __ ld(tmp4, 24, R3_ARG1);
1737      __ std(tmp1, 0, R4_ARG2);
1738      __ std(tmp2, 8, R4_ARG2);
1739      __ std(tmp3, 16, R4_ARG2);
1740      __ std(tmp4, 24, R4_ARG2);
1741      __ addi(R3_ARG1, R3_ARG1, 32);
1742      __ addi(R4_ARG2, R4_ARG2, 32);
1743      __ bdnz(l_4);
1744    }
1745
1746    // copy 1 element at a time
1747    __ bind(l_3);
1748    __ cmpwi(CCR0, R5_ARG3, 0);
1749    __ beq(CCR0, l_1);
1750
1751    { // FasterArrayCopy
1752      __ mtctr(R5_ARG3);
1753      __ addi(R3_ARG1, R3_ARG1, -8);
1754      __ addi(R4_ARG2, R4_ARG2, -8);
1755
1756      __ bind(l_2);
1757      __ ldu(R0, 8, R3_ARG1);
1758      __ stdu(R0, 8, R4_ARG2);
1759      __ bdnz(l_2);
1760
1761    }
1762    __ bind(l_1);
1763  }
1764
1765  // Generate stub for disjoint long copy.  If "aligned" is true, the
1766  // "from" and "to" addresses are assumed to be heapword aligned.
1767  //
1768  // Arguments for generated stub:
1769  //      from:  R3_ARG1
1770  //      to:    R4_ARG2
1771  //      count: R5_ARG3 treated as signed
1772  //
1773  address generate_disjoint_long_copy(bool aligned, const char * name) {
1774    StubCodeMark mark(this, "StubRoutines", name);
1775    address start = __ function_entry();
1776    assert_positive_int(R5_ARG3);
1777    generate_disjoint_long_copy_core(aligned);
1778    __ li(R3_RET, 0); // return 0
1779    __ blr();
1780
1781    return start;
1782  }
1783
1784  // Generate core code for conjoint long copy (and oop copy on
1785  // 64-bit).  If "aligned" is true, the "from" and "to" addresses
1786  // are assumed to be heapword aligned.
1787  //
1788  // Arguments:
1789  //      from:  R3_ARG1
1790  //      to:    R4_ARG2
1791  //      count: R5_ARG3 treated as signed
1792  //
1793  void generate_conjoint_long_copy_core(bool aligned) {
1794    Register tmp1 = R6_ARG4;
1795    Register tmp2 = R7_ARG5;
1796    Register tmp3 = R8_ARG6;
1797    Register tmp4 = R0;
1798
1799    Label l_1, l_2, l_3, l_4, l_5;
1800
1801    __ cmpwi(CCR0, R5_ARG3, 0);
1802    __ beq(CCR0, l_1);
1803
1804    { // FasterArrayCopy
1805      __ sldi(R5_ARG3, R5_ARG3, 3);
1806      __ add(R3_ARG1, R3_ARG1, R5_ARG3);
1807      __ add(R4_ARG2, R4_ARG2, R5_ARG3);
1808      __ srdi(R5_ARG3, R5_ARG3, 3);
1809
1810      __ cmpwi(CCR0, R5_ARG3, 3);
1811      __ ble(CCR0, l_5); // copy 1 at a time if less than 4 elements remain
1812
1813      __ srdi(tmp1, R5_ARG3, 2);
1814      __ andi(R5_ARG3, R5_ARG3, 3);
1815      __ mtctr(tmp1);
1816
1817      __ bind(l_4);
1818      // Use unrolled version for mass copying (copy 4 elements a time).
1819      // Load feeding store gets zero latency on Power6, however not on Power5.
1820      // Therefore, the following sequence is made for the good of both.
1821      __ addi(R3_ARG1, R3_ARG1, -32);
1822      __ addi(R4_ARG2, R4_ARG2, -32);
1823      __ ld(tmp4, 24, R3_ARG1);
1824      __ ld(tmp3, 16, R3_ARG1);
1825      __ ld(tmp2, 8, R3_ARG1);
1826      __ ld(tmp1, 0, R3_ARG1);
1827      __ std(tmp4, 24, R4_ARG2);
1828      __ std(tmp3, 16, R4_ARG2);
1829      __ std(tmp2, 8, R4_ARG2);
1830      __ std(tmp1, 0, R4_ARG2);
1831      __ bdnz(l_4);
1832
1833      __ cmpwi(CCR0, R5_ARG3, 0);
1834      __ beq(CCR0, l_1);
1835
1836      __ bind(l_5);
1837      __ mtctr(R5_ARG3);
1838      __ bind(l_3);
1839      __ ld(R0, -8, R3_ARG1);
1840      __ std(R0, -8, R4_ARG2);
1841      __ addi(R3_ARG1, R3_ARG1, -8);
1842      __ addi(R4_ARG2, R4_ARG2, -8);
1843      __ bdnz(l_3);
1844
1845    }
1846    __ bind(l_1);
1847  }
1848
1849  // Generate stub for conjoint long copy.  If "aligned" is true, the
1850  // "from" and "to" addresses are assumed to be heapword aligned.
1851  //
1852  // Arguments for generated stub:
1853  //      from:  R3_ARG1
1854  //      to:    R4_ARG2
1855  //      count: R5_ARG3 treated as signed
1856  //
1857  address generate_conjoint_long_copy(bool aligned, const char * name) {
1858    StubCodeMark mark(this, "StubRoutines", name);
1859    address start = __ function_entry();
1860    assert_positive_int(R5_ARG3);
1861    address nooverlap_target = aligned ?
1862      STUB_ENTRY(arrayof_jlong_disjoint_arraycopy) :
1863      STUB_ENTRY(jlong_disjoint_arraycopy);
1864
1865    array_overlap_test(nooverlap_target, 3);
1866    generate_conjoint_long_copy_core(aligned);
1867
1868    __ li(R3_RET, 0); // return 0
1869    __ blr();
1870
1871    return start;
1872  }
1873
1874  // Generate stub for conjoint oop copy.  If "aligned" is true, the
1875  // "from" and "to" addresses are assumed to be heapword aligned.
1876  //
1877  // Arguments for generated stub:
1878  //      from:  R3_ARG1
1879  //      to:    R4_ARG2
1880  //      count: R5_ARG3 treated as signed
1881  //      dest_uninitialized: G1 support
1882  //
1883  address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1884    StubCodeMark mark(this, "StubRoutines", name);
1885
1886    address start = __ function_entry();
1887    assert_positive_int(R5_ARG3);
1888    address nooverlap_target = aligned ?
1889      STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
1890      STUB_ENTRY(oop_disjoint_arraycopy);
1891
1892    gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1893
1894    // Save arguments.
1895    __ mr(R9_ARG7, R4_ARG2);
1896    __ mr(R10_ARG8, R5_ARG3);
1897
1898    if (UseCompressedOops) {
1899      array_overlap_test(nooverlap_target, 2);
1900      generate_conjoint_int_copy_core(aligned);
1901    } else {
1902      array_overlap_test(nooverlap_target, 3);
1903      generate_conjoint_long_copy_core(aligned);
1904    }
1905
1906    gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
1907    __ li(R3_RET, 0); // return 0
1908    __ blr();
1909    return start;
1910  }
1911
1912  // Generate stub for disjoint oop copy.  If "aligned" is true, the
1913  // "from" and "to" addresses are assumed to be heapword aligned.
1914  //
1915  // Arguments for generated stub:
1916  //      from:  R3_ARG1
1917  //      to:    R4_ARG2
1918  //      count: R5_ARG3 treated as signed
1919  //      dest_uninitialized: G1 support
1920  //
1921  address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
1922    StubCodeMark mark(this, "StubRoutines", name);
1923    address start = __ function_entry();
1924    assert_positive_int(R5_ARG3);
1925    gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
1926
1927    // save some arguments, disjoint_long_copy_core destroys them.
1928    // needed for post barrier
1929    __ mr(R9_ARG7, R4_ARG2);
1930    __ mr(R10_ARG8, R5_ARG3);
1931
1932    if (UseCompressedOops) {
1933      generate_disjoint_int_copy_core(aligned);
1934    } else {
1935      generate_disjoint_long_copy_core(aligned);
1936    }
1937
1938    gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
1939    __ li(R3_RET, 0); // return 0
1940    __ blr();
1941
1942    return start;
1943  }
1944
1945
1946  // Helper for generating a dynamic type check.
1947  // Smashes only the given temp registers.
1948  void generate_type_check(Register sub_klass,
1949                           Register super_check_offset,
1950                           Register super_klass,
1951                           Register temp,
1952                           Label& L_success) {
1953    assert_different_registers(sub_klass, super_check_offset, super_klass);
1954
1955    BLOCK_COMMENT("type_check:");
1956
1957    Label L_miss;
1958
1959    __ check_klass_subtype_fast_path(sub_klass, super_klass, temp, R0, &L_success, &L_miss, NULL,
1960                                     super_check_offset);
1961    __ check_klass_subtype_slow_path(sub_klass, super_klass, temp, R0, &L_success, NULL);
1962
1963    // Fall through on failure!
1964    __ bind(L_miss);
1965  }
1966
1967
1968  //  Generate stub for checked oop copy.
1969  //
1970  // Arguments for generated stub:
1971  //      from:  R3
1972  //      to:    R4
1973  //      count: R5 treated as signed
1974  //      ckoff: R6 (super_check_offset)
1975  //      ckval: R7 (super_klass)
1976  //      ret:   R3 zero for success; (-1^K) where K is partial transfer count
1977  //
1978  address generate_checkcast_copy(const char *name, bool dest_uninitialized) {
1979
1980    const Register R3_from   = R3_ARG1;      // source array address
1981    const Register R4_to     = R4_ARG2;      // destination array address
1982    const Register R5_count  = R5_ARG3;      // elements count
1983    const Register R6_ckoff  = R6_ARG4;      // super_check_offset
1984    const Register R7_ckval  = R7_ARG5;      // super_klass
1985
1986    const Register R8_offset = R8_ARG6;      // loop var, with stride wordSize
1987    const Register R9_remain = R9_ARG7;      // loop var, with stride -1
1988    const Register R10_oop   = R10_ARG8;     // actual oop copied
1989    const Register R11_klass = R11_scratch1; // oop._klass
1990    const Register R12_tmp   = R12_scratch2;
1991
1992    const Register R2_minus1 = R2;
1993
1994    //__ align(CodeEntryAlignment);
1995    StubCodeMark mark(this, "StubRoutines", name);
1996    address start = __ function_entry();
1997
1998    // Assert that int is 64 bit sign extended and arrays are not conjoint.
1999#ifdef ASSERT
2000    {
2001    assert_positive_int(R5_ARG3);
2002    const Register tmp1 = R11_scratch1, tmp2 = R12_scratch2;
2003    Label no_overlap;
2004    __ subf(tmp1, R3_ARG1, R4_ARG2); // distance in bytes
2005    __ sldi(tmp2, R5_ARG3, LogBytesPerHeapOop); // size in bytes
2006    __ cmpld(CCR0, R3_ARG1, R4_ARG2); // Use unsigned comparison!
2007    __ cmpld(CCR1, tmp1, tmp2);
2008    __ crnand(CCR0, Assembler::less, CCR1, Assembler::less);
2009    // Overlaps if Src before dst and distance smaller than size.
2010    // Branch to forward copy routine otherwise.
2011    __ blt(CCR0, no_overlap);
2012    __ stop("overlap in checkcast_copy", 0x9543);
2013    __ bind(no_overlap);
2014    }
2015#endif
2016
2017    gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
2018
2019    //inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
2020
2021    Label load_element, store_element, store_null, success, do_card_marks;
2022    __ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
2023    __ li(R8_offset, 0);                   // Offset from start of arrays.
2024    __ li(R2_minus1, -1);
2025    __ bne(CCR0, load_element);
2026
2027    // Empty array: Nothing to do.
2028    __ li(R3_RET, 0);           // Return 0 on (trivial) success.
2029    __ blr();
2030
2031    // ======== begin loop ========
2032    // (Entry is load_element.)
2033    __ align(OptoLoopAlignment);
2034    __ bind(store_element);
2035    if (UseCompressedOops) {
2036      __ encode_heap_oop_not_null(R10_oop);
2037      __ bind(store_null);
2038      __ stw(R10_oop, R8_offset, R4_to);
2039    } else {
2040      __ bind(store_null);
2041      __ std(R10_oop, R8_offset, R4_to);
2042    }
2043
2044    __ addi(R8_offset, R8_offset, heapOopSize);   // Step to next offset.
2045    __ add_(R9_remain, R2_minus1, R9_remain);     // Decrement the count.
2046    __ beq(CCR0, success);
2047
2048    // ======== loop entry is here ========
2049    __ bind(load_element);
2050    __ load_heap_oop(R10_oop, R8_offset, R3_from, &store_null);  // Load the oop.
2051
2052    __ load_klass(R11_klass, R10_oop); // Query the object klass.
2053
2054    generate_type_check(R11_klass, R6_ckoff, R7_ckval, R12_tmp,
2055                        // Branch to this on success:
2056                        store_element);
2057    // ======== end loop ========
2058
2059    // It was a real error; we must depend on the caller to finish the job.
2060    // Register R9_remain has number of *remaining* oops, R5_count number of *total* oops.
2061    // Emit GC store barriers for the oops we have copied (R5_count minus R9_remain),
2062    // and report their number to the caller.
2063    __ subf_(R5_count, R9_remain, R5_count);
2064    __ nand(R3_RET, R5_count, R5_count);   // report (-1^K) to caller
2065    __ bne(CCR0, do_card_marks);
2066    __ blr();
2067
2068    __ bind(success);
2069    __ li(R3_RET, 0);
2070
2071    __ bind(do_card_marks);
2072    // Store check on R4_to[0..R5_count-1].
2073    gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
2074    __ blr();
2075    return start;
2076  }
2077
2078
2079  //  Generate 'unsafe' array copy stub.
2080  //  Though just as safe as the other stubs, it takes an unscaled
2081  //  size_t argument instead of an element count.
2082  //
2083  // Arguments for generated stub:
2084  //      from:  R3
2085  //      to:    R4
2086  //      count: R5 byte count, treated as ssize_t, can be zero
2087  //
2088  // Examines the alignment of the operands and dispatches
2089  // to a long, int, short, or byte copy loop.
2090  //
2091  address generate_unsafe_copy(const char* name,
2092                               address byte_copy_entry,
2093                               address short_copy_entry,
2094                               address int_copy_entry,
2095                               address long_copy_entry) {
2096
2097    const Register R3_from   = R3_ARG1;      // source array address
2098    const Register R4_to     = R4_ARG2;      // destination array address
2099    const Register R5_count  = R5_ARG3;      // elements count (as long on PPC64)
2100
2101    const Register R6_bits   = R6_ARG4;      // test copy of low bits
2102    const Register R7_tmp    = R7_ARG5;
2103
2104    //__ align(CodeEntryAlignment);
2105    StubCodeMark mark(this, "StubRoutines", name);
2106    address start = __ function_entry();
2107
2108    // Bump this on entry, not on exit:
2109    //inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr, R6_bits, R7_tmp);
2110
2111    Label short_copy, int_copy, long_copy;
2112
2113    __ orr(R6_bits, R3_from, R4_to);
2114    __ orr(R6_bits, R6_bits, R5_count);
2115    __ andi_(R0, R6_bits, (BytesPerLong-1));
2116    __ beq(CCR0, long_copy);
2117
2118    __ andi_(R0, R6_bits, (BytesPerInt-1));
2119    __ beq(CCR0, int_copy);
2120
2121    __ andi_(R0, R6_bits, (BytesPerShort-1));
2122    __ beq(CCR0, short_copy);
2123
2124    // byte_copy:
2125    __ b(byte_copy_entry);
2126
2127    __ bind(short_copy);
2128    __ srwi(R5_count, R5_count, LogBytesPerShort);
2129    __ b(short_copy_entry);
2130
2131    __ bind(int_copy);
2132    __ srwi(R5_count, R5_count, LogBytesPerInt);
2133    __ b(int_copy_entry);
2134
2135    __ bind(long_copy);
2136    __ srwi(R5_count, R5_count, LogBytesPerLong);
2137    __ b(long_copy_entry);
2138
2139    return start;
2140  }
2141
2142
2143  // Perform range checks on the proposed arraycopy.
2144  // Kills the two temps, but nothing else.
2145  // Also, clean the sign bits of src_pos and dst_pos.
2146  void arraycopy_range_checks(Register src,     // source array oop
2147                              Register src_pos, // source position
2148                              Register dst,     // destination array oop
2149                              Register dst_pos, // destination position
2150                              Register length,  // length of copy
2151                              Register temp1, Register temp2,
2152                              Label& L_failed) {
2153    BLOCK_COMMENT("arraycopy_range_checks:");
2154
2155    const Register array_length = temp1;  // scratch
2156    const Register end_pos      = temp2;  // scratch
2157
2158    //  if (src_pos + length > arrayOop(src)->length() ) FAIL;
2159    __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), src);
2160    __ add(end_pos, src_pos, length);  // src_pos + length
2161    __ cmpd(CCR0, end_pos, array_length);
2162    __ bgt(CCR0, L_failed);
2163
2164    //  if (dst_pos + length > arrayOop(dst)->length() ) FAIL;
2165    __ lwa(array_length, arrayOopDesc::length_offset_in_bytes(), dst);
2166    __ add(end_pos, dst_pos, length);  // src_pos + length
2167    __ cmpd(CCR0, end_pos, array_length);
2168    __ bgt(CCR0, L_failed);
2169
2170    BLOCK_COMMENT("arraycopy_range_checks done");
2171  }
2172
2173
2174  //
2175  //  Generate generic array copy stubs
2176  //
2177  //  Input:
2178  //    R3    -  src oop
2179  //    R4    -  src_pos
2180  //    R5    -  dst oop
2181  //    R6    -  dst_pos
2182  //    R7    -  element count
2183  //
2184  //  Output:
2185  //    R3 ==  0  -  success
2186  //    R3 == -1  -  need to call System.arraycopy
2187  //
2188  address generate_generic_copy(const char *name,
2189                                address entry_jbyte_arraycopy,
2190                                address entry_jshort_arraycopy,
2191                                address entry_jint_arraycopy,
2192                                address entry_oop_arraycopy,
2193                                address entry_disjoint_oop_arraycopy,
2194                                address entry_jlong_arraycopy,
2195                                address entry_checkcast_arraycopy) {
2196    Label L_failed, L_objArray;
2197
2198    // Input registers
2199    const Register src       = R3_ARG1;  // source array oop
2200    const Register src_pos   = R4_ARG2;  // source position
2201    const Register dst       = R5_ARG3;  // destination array oop
2202    const Register dst_pos   = R6_ARG4;  // destination position
2203    const Register length    = R7_ARG5;  // elements count
2204
2205    // registers used as temp
2206    const Register src_klass = R8_ARG6;  // source array klass
2207    const Register dst_klass = R9_ARG7;  // destination array klass
2208    const Register lh        = R10_ARG8; // layout handler
2209    const Register temp      = R2;
2210
2211    //__ align(CodeEntryAlignment);
2212    StubCodeMark mark(this, "StubRoutines", name);
2213    address start = __ function_entry();
2214
2215    // Bump this on entry, not on exit:
2216    //inc_counter_np(SharedRuntime::_generic_array_copy_ctr, lh, temp);
2217
2218    // In principle, the int arguments could be dirty.
2219
2220    //-----------------------------------------------------------------------
2221    // Assembler stubs will be used for this call to arraycopy
2222    // if the following conditions are met:
2223    //
2224    // (1) src and dst must not be null.
2225    // (2) src_pos must not be negative.
2226    // (3) dst_pos must not be negative.
2227    // (4) length  must not be negative.
2228    // (5) src klass and dst klass should be the same and not NULL.
2229    // (6) src and dst should be arrays.
2230    // (7) src_pos + length must not exceed length of src.
2231    // (8) dst_pos + length must not exceed length of dst.
2232    BLOCK_COMMENT("arraycopy initial argument checks");
2233
2234    __ cmpdi(CCR1, src, 0);      // if (src == NULL) return -1;
2235    __ extsw_(src_pos, src_pos); // if (src_pos < 0) return -1;
2236    __ cmpdi(CCR5, dst, 0);      // if (dst == NULL) return -1;
2237    __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2238    __ extsw_(dst_pos, dst_pos); // if (src_pos < 0) return -1;
2239    __ cror(CCR5, Assembler::equal, CCR0, Assembler::less);
2240    __ extsw_(length, length);   // if (length < 0) return -1;
2241    __ cror(CCR1, Assembler::equal, CCR5, Assembler::equal);
2242    __ cror(CCR1, Assembler::equal, CCR0, Assembler::less);
2243    __ beq(CCR1, L_failed);
2244
2245    BLOCK_COMMENT("arraycopy argument klass checks");
2246    __ load_klass(src_klass, src);
2247    __ load_klass(dst_klass, dst);
2248
2249    // Load layout helper
2250    //
2251    //  |array_tag|     | header_size | element_type |     |log2_element_size|
2252    // 32        30    24            16              8     2                 0
2253    //
2254    //   array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
2255    //
2256
2257    int lh_offset = in_bytes(Klass::layout_helper_offset());
2258
2259    // Load 32-bits signed value. Use br() instruction with it to check icc.
2260    __ lwz(lh, lh_offset, src_klass);
2261
2262    // Handle objArrays completely differently...
2263    jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
2264    __ load_const_optimized(temp, objArray_lh, R0);
2265    __ cmpw(CCR0, lh, temp);
2266    __ beq(CCR0, L_objArray);
2267
2268    __ cmpd(CCR5, src_klass, dst_klass);          // if (src->klass() != dst->klass()) return -1;
2269    __ cmpwi(CCR6, lh, Klass::_lh_neutral_value); // if (!src->is_Array()) return -1;
2270
2271    __ crnand(CCR5, Assembler::equal, CCR6, Assembler::less);
2272    __ beq(CCR5, L_failed);
2273
2274    // At this point, it is known to be a typeArray (array_tag 0x3).
2275#ifdef ASSERT
2276    { Label L;
2277      jint lh_prim_tag_in_place = (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift);
2278      __ load_const_optimized(temp, lh_prim_tag_in_place, R0);
2279      __ cmpw(CCR0, lh, temp);
2280      __ bge(CCR0, L);
2281      __ stop("must be a primitive array");
2282      __ bind(L);
2283    }
2284#endif
2285
2286    arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2287                           temp, dst_klass, L_failed);
2288
2289    // TypeArrayKlass
2290    //
2291    // src_addr = (src + array_header_in_bytes()) + (src_pos << log2elemsize);
2292    // dst_addr = (dst + array_header_in_bytes()) + (dst_pos << log2elemsize);
2293    //
2294
2295    const Register offset = dst_klass;    // array offset
2296    const Register elsize = src_klass;    // log2 element size
2297
2298    __ rldicl(offset, lh, 64 - Klass::_lh_header_size_shift, 64 - exact_log2(Klass::_lh_header_size_mask + 1));
2299    __ andi(elsize, lh, Klass::_lh_log2_element_size_mask);
2300    __ add(src, offset, src);       // src array offset
2301    __ add(dst, offset, dst);       // dst array offset
2302
2303    // Next registers should be set before the jump to corresponding stub.
2304    const Register from     = R3_ARG1;  // source array address
2305    const Register to       = R4_ARG2;  // destination array address
2306    const Register count    = R5_ARG3;  // elements count
2307
2308    // 'from', 'to', 'count' registers should be set in this order
2309    // since they are the same as 'src', 'src_pos', 'dst'.
2310
2311    BLOCK_COMMENT("scale indexes to element size");
2312    __ sld(src_pos, src_pos, elsize);
2313    __ sld(dst_pos, dst_pos, elsize);
2314    __ add(from, src_pos, src);  // src_addr
2315    __ add(to, dst_pos, dst);    // dst_addr
2316    __ mr(count, length);        // length
2317
2318    BLOCK_COMMENT("choose copy loop based on element size");
2319    // Using conditional branches with range 32kB.
2320    const int bo = Assembler::bcondCRbiIs1, bi = Assembler::bi0(CCR0, Assembler::equal);
2321    __ cmpwi(CCR0, elsize, 0);
2322    __ bc(bo, bi, entry_jbyte_arraycopy);
2323    __ cmpwi(CCR0, elsize, LogBytesPerShort);
2324    __ bc(bo, bi, entry_jshort_arraycopy);
2325    __ cmpwi(CCR0, elsize, LogBytesPerInt);
2326    __ bc(bo, bi, entry_jint_arraycopy);
2327#ifdef ASSERT
2328    { Label L;
2329      __ cmpwi(CCR0, elsize, LogBytesPerLong);
2330      __ beq(CCR0, L);
2331      __ stop("must be long copy, but elsize is wrong");
2332      __ bind(L);
2333    }
2334#endif
2335    __ b(entry_jlong_arraycopy);
2336
2337    // ObjArrayKlass
2338  __ bind(L_objArray);
2339    // live at this point:  src_klass, dst_klass, src[_pos], dst[_pos], length
2340
2341    Label L_disjoint_plain_copy, L_checkcast_copy;
2342    //  test array classes for subtyping
2343    __ cmpd(CCR0, src_klass, dst_klass);         // usual case is exact equality
2344    __ bne(CCR0, L_checkcast_copy);
2345
2346    // Identically typed arrays can be copied without element-wise checks.
2347    arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2348                           temp, lh, L_failed);
2349
2350    __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2351    __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2352    __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2353    __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2354    __ add(from, src_pos, src);  // src_addr
2355    __ add(to, dst_pos, dst);    // dst_addr
2356    __ mr(count, length);        // length
2357    __ b(entry_oop_arraycopy);
2358
2359  __ bind(L_checkcast_copy);
2360    // live at this point:  src_klass, dst_klass
2361    {
2362      // Before looking at dst.length, make sure dst is also an objArray.
2363      __ lwz(temp, lh_offset, dst_klass);
2364      __ cmpw(CCR0, lh, temp);
2365      __ bne(CCR0, L_failed);
2366
2367      // It is safe to examine both src.length and dst.length.
2368      arraycopy_range_checks(src, src_pos, dst, dst_pos, length,
2369                             temp, lh, L_failed);
2370
2371      // Marshal the base address arguments now, freeing registers.
2372      __ addi(src, src, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //src offset
2373      __ addi(dst, dst, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); //dst offset
2374      __ sldi(src_pos, src_pos, LogBytesPerHeapOop);
2375      __ sldi(dst_pos, dst_pos, LogBytesPerHeapOop);
2376      __ add(from, src_pos, src);  // src_addr
2377      __ add(to, dst_pos, dst);    // dst_addr
2378      __ mr(count, length);        // length
2379
2380      Register sco_temp = R6_ARG4;             // This register is free now.
2381      assert_different_registers(from, to, count, sco_temp,
2382                                 dst_klass, src_klass);
2383
2384      // Generate the type check.
2385      int sco_offset = in_bytes(Klass::super_check_offset_offset());
2386      __ lwz(sco_temp, sco_offset, dst_klass);
2387      generate_type_check(src_klass, sco_temp, dst_klass,
2388                          temp, L_disjoint_plain_copy);
2389
2390      // Fetch destination element klass from the ObjArrayKlass header.
2391      int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
2392
2393      // The checkcast_copy loop needs two extra arguments:
2394      __ ld(R7_ARG5, ek_offset, dst_klass);   // dest elem klass
2395      __ lwz(R6_ARG4, sco_offset, R7_ARG5);   // sco of elem klass
2396      __ b(entry_checkcast_arraycopy);
2397    }
2398
2399    __ bind(L_disjoint_plain_copy);
2400    __ b(entry_disjoint_oop_arraycopy);
2401
2402  __ bind(L_failed);
2403    __ li(R3_RET, -1); // return -1
2404    __ blr();
2405    return start;
2406  }
2407
2408  // Arguments for generated stub (little endian only):
2409  //   R3_ARG1   - source byte array address
2410  //   R4_ARG2   - destination byte array address
2411  //   R5_ARG3   - round key array
2412  address generate_aescrypt_encryptBlock() {
2413    assert(UseAES, "need AES instructions and misaligned SSE support");
2414    StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
2415
2416    address start = __ function_entry();
2417
2418    Label L_doLast;
2419
2420    Register from           = R3_ARG1;  // source array address
2421    Register to             = R4_ARG2;  // destination array address
2422    Register key            = R5_ARG3;  // round key array
2423
2424    Register keylen         = R8;
2425    Register temp           = R9;
2426    Register keypos         = R10;
2427    Register hex            = R11;
2428    Register fifteen        = R12;
2429
2430    VectorRegister vRet     = VR0;
2431
2432    VectorRegister vKey1    = VR1;
2433    VectorRegister vKey2    = VR2;
2434    VectorRegister vKey3    = VR3;
2435    VectorRegister vKey4    = VR4;
2436
2437    VectorRegister fromPerm = VR5;
2438    VectorRegister keyPerm  = VR6;
2439    VectorRegister toPerm   = VR7;
2440    VectorRegister fSplt    = VR8;
2441
2442    VectorRegister vTmp1    = VR9;
2443    VectorRegister vTmp2    = VR10;
2444    VectorRegister vTmp3    = VR11;
2445    VectorRegister vTmp4    = VR12;
2446
2447    VectorRegister vLow     = VR13;
2448    VectorRegister vHigh    = VR14;
2449
2450    __ li              (hex, 16);
2451    __ li              (fifteen, 15);
2452    __ vspltisb        (fSplt, 0x0f);
2453
2454    // load unaligned from[0-15] to vsRet
2455    __ lvx             (vRet, from);
2456    __ lvx             (vTmp1, fifteen, from);
2457    __ lvsl            (fromPerm, from);
2458    __ vxor            (fromPerm, fromPerm, fSplt);
2459    __ vperm           (vRet, vRet, vTmp1, fromPerm);
2460
2461    // load keylen (44 or 52 or 60)
2462    __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2463
2464    // to load keys
2465    __ lvsr            (keyPerm, key);
2466    __ vxor            (vTmp2, vTmp2, vTmp2);
2467    __ vspltisb        (vTmp2, -16);
2468    __ vrld            (keyPerm, keyPerm, vTmp2);
2469    __ vrld            (keyPerm, keyPerm, vTmp2);
2470    __ vsldoi          (keyPerm, keyPerm, keyPerm, -8);
2471
2472    // load the 1st round key to vKey1
2473    __ li              (keypos, 0);
2474    __ lvx             (vKey1, keypos, key);
2475    __ addi            (keypos, keypos, 16);
2476    __ lvx             (vTmp1, keypos, key);
2477    __ vperm           (vKey1, vTmp1, vKey1, keyPerm);
2478
2479    // 1st round
2480    __ vxor (vRet, vRet, vKey1);
2481
2482    // load the 2nd round key to vKey1
2483    __ addi            (keypos, keypos, 16);
2484    __ lvx             (vTmp2, keypos, key);
2485    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2486
2487    // load the 3rd round key to vKey2
2488    __ addi            (keypos, keypos, 16);
2489    __ lvx             (vTmp1, keypos, key);
2490    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2491
2492    // load the 4th round key to vKey3
2493    __ addi            (keypos, keypos, 16);
2494    __ lvx             (vTmp2, keypos, key);
2495    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2496
2497    // load the 5th round key to vKey4
2498    __ addi            (keypos, keypos, 16);
2499    __ lvx             (vTmp1, keypos, key);
2500    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2501
2502    // 2nd - 5th rounds
2503    __ vcipher (vRet, vRet, vKey1);
2504    __ vcipher (vRet, vRet, vKey2);
2505    __ vcipher (vRet, vRet, vKey3);
2506    __ vcipher (vRet, vRet, vKey4);
2507
2508    // load the 6th round key to vKey1
2509    __ addi            (keypos, keypos, 16);
2510    __ lvx             (vTmp2, keypos, key);
2511    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2512
2513    // load the 7th round key to vKey2
2514    __ addi            (keypos, keypos, 16);
2515    __ lvx             (vTmp1, keypos, key);
2516    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2517
2518    // load the 8th round key to vKey3
2519    __ addi            (keypos, keypos, 16);
2520    __ lvx             (vTmp2, keypos, key);
2521    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2522
2523    // load the 9th round key to vKey4
2524    __ addi            (keypos, keypos, 16);
2525    __ lvx             (vTmp1, keypos, key);
2526    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2527
2528    // 6th - 9th rounds
2529    __ vcipher (vRet, vRet, vKey1);
2530    __ vcipher (vRet, vRet, vKey2);
2531    __ vcipher (vRet, vRet, vKey3);
2532    __ vcipher (vRet, vRet, vKey4);
2533
2534    // load the 10th round key to vKey1
2535    __ addi            (keypos, keypos, 16);
2536    __ lvx             (vTmp2, keypos, key);
2537    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2538
2539    // load the 11th round key to vKey2
2540    __ addi            (keypos, keypos, 16);
2541    __ lvx             (vTmp1, keypos, key);
2542    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2543
2544    // if all round keys are loaded, skip next 4 rounds
2545    __ cmpwi           (CCR0, keylen, 44);
2546    __ beq             (CCR0, L_doLast);
2547
2548    // 10th - 11th rounds
2549    __ vcipher (vRet, vRet, vKey1);
2550    __ vcipher (vRet, vRet, vKey2);
2551
2552    // load the 12th round key to vKey1
2553    __ addi            (keypos, keypos, 16);
2554    __ lvx             (vTmp2, keypos, key);
2555    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2556
2557    // load the 13th round key to vKey2
2558    __ addi            (keypos, keypos, 16);
2559    __ lvx             (vTmp1, keypos, key);
2560    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2561
2562    // if all round keys are loaded, skip next 2 rounds
2563    __ cmpwi           (CCR0, keylen, 52);
2564    __ beq             (CCR0, L_doLast);
2565
2566    // 12th - 13th rounds
2567    __ vcipher (vRet, vRet, vKey1);
2568    __ vcipher (vRet, vRet, vKey2);
2569
2570    // load the 14th round key to vKey1
2571    __ addi            (keypos, keypos, 16);
2572    __ lvx             (vTmp2, keypos, key);
2573    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2574
2575    // load the 15th round key to vKey2
2576    __ addi            (keypos, keypos, 16);
2577    __ lvx             (vTmp1, keypos, key);
2578    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2579
2580    __ bind(L_doLast);
2581
2582    // last two rounds
2583    __ vcipher (vRet, vRet, vKey1);
2584    __ vcipherlast (vRet, vRet, vKey2);
2585
2586    __ neg             (temp, to);
2587    __ lvsr            (toPerm, temp);
2588    __ vspltisb        (vTmp2, -1);
2589    __ vxor            (vTmp1, vTmp1, vTmp1);
2590    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
2591    __ vxor            (toPerm, toPerm, fSplt);
2592    __ lvx             (vTmp1, to);
2593    __ vperm           (vRet, vRet, vRet, toPerm);
2594    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
2595    __ lvx             (vTmp4, fifteen, to);
2596    __ stvx            (vTmp1, to);
2597    __ vsel            (vRet, vRet, vTmp4, vTmp2);
2598    __ stvx            (vRet, fifteen, to);
2599
2600    __ blr();
2601     return start;
2602  }
2603
2604  // Arguments for generated stub (little endian only):
2605  //   R3_ARG1   - source byte array address
2606  //   R4_ARG2   - destination byte array address
2607  //   R5_ARG3   - K (key) in little endian int array
2608  address generate_aescrypt_decryptBlock() {
2609    assert(UseAES, "need AES instructions and misaligned SSE support");
2610    StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
2611
2612    address start = __ function_entry();
2613
2614    Label L_doLast;
2615    Label L_do44;
2616    Label L_do52;
2617    Label L_do60;
2618
2619    Register from           = R3_ARG1;  // source array address
2620    Register to             = R4_ARG2;  // destination array address
2621    Register key            = R5_ARG3;  // round key array
2622
2623    Register keylen         = R8;
2624    Register temp           = R9;
2625    Register keypos         = R10;
2626    Register hex            = R11;
2627    Register fifteen        = R12;
2628
2629    VectorRegister vRet     = VR0;
2630
2631    VectorRegister vKey1    = VR1;
2632    VectorRegister vKey2    = VR2;
2633    VectorRegister vKey3    = VR3;
2634    VectorRegister vKey4    = VR4;
2635    VectorRegister vKey5    = VR5;
2636
2637    VectorRegister fromPerm = VR6;
2638    VectorRegister keyPerm  = VR7;
2639    VectorRegister toPerm   = VR8;
2640    VectorRegister fSplt    = VR9;
2641
2642    VectorRegister vTmp1    = VR10;
2643    VectorRegister vTmp2    = VR11;
2644    VectorRegister vTmp3    = VR12;
2645    VectorRegister vTmp4    = VR13;
2646
2647    VectorRegister vLow     = VR14;
2648    VectorRegister vHigh    = VR15;
2649
2650    __ li              (hex, 16);
2651    __ li              (fifteen, 15);
2652    __ vspltisb        (fSplt, 0x0f);
2653
2654    // load unaligned from[0-15] to vsRet
2655    __ lvx             (vRet, from);
2656    __ lvx             (vTmp1, fifteen, from);
2657    __ lvsl            (fromPerm, from);
2658    __ vxor            (fromPerm, fromPerm, fSplt);
2659    __ vperm           (vRet, vRet, vTmp1, fromPerm); // align [and byte swap in LE]
2660
2661    // load keylen (44 or 52 or 60)
2662    __ lwz             (keylen, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT), key);
2663
2664    // to load keys
2665    __ lvsr            (keyPerm, key);
2666    __ vxor            (vTmp2, vTmp2, vTmp2);
2667    __ vspltisb        (vTmp2, -16);
2668    __ vrld            (keyPerm, keyPerm, vTmp2);
2669    __ vrld            (keyPerm, keyPerm, vTmp2);
2670    __ vsldoi          (keyPerm, keyPerm, keyPerm, -8);
2671
2672    __ cmpwi           (CCR0, keylen, 44);
2673    __ beq             (CCR0, L_do44);
2674
2675    __ cmpwi           (CCR0, keylen, 52);
2676    __ beq             (CCR0, L_do52);
2677
2678    // load the 15th round key to vKey11
2679    __ li              (keypos, 240);
2680    __ lvx             (vTmp1, keypos, key);
2681    __ addi            (keypos, keypos, -16);
2682    __ lvx             (vTmp2, keypos, key);
2683    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2684
2685    // load the 14th round key to vKey10
2686    __ addi            (keypos, keypos, -16);
2687    __ lvx             (vTmp1, keypos, key);
2688    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2689
2690    // load the 13th round key to vKey10
2691    __ addi            (keypos, keypos, -16);
2692    __ lvx             (vTmp2, keypos, key);
2693    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2694
2695    // load the 12th round key to vKey10
2696    __ addi            (keypos, keypos, -16);
2697    __ lvx             (vTmp1, keypos, key);
2698    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
2699
2700    // load the 11th round key to vKey10
2701    __ addi            (keypos, keypos, -16);
2702    __ lvx             (vTmp2, keypos, key);
2703    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
2704
2705    // 1st - 5th rounds
2706    __ vxor            (vRet, vRet, vKey1);
2707    __ vncipher        (vRet, vRet, vKey2);
2708    __ vncipher        (vRet, vRet, vKey3);
2709    __ vncipher        (vRet, vRet, vKey4);
2710    __ vncipher        (vRet, vRet, vKey5);
2711
2712    __ b               (L_doLast);
2713
2714    __ bind            (L_do52);
2715
2716    // load the 13th round key to vKey11
2717    __ li              (keypos, 208);
2718    __ lvx             (vTmp1, keypos, key);
2719    __ addi            (keypos, keypos, -16);
2720    __ lvx             (vTmp2, keypos, key);
2721    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2722
2723    // load the 12th round key to vKey10
2724    __ addi            (keypos, keypos, -16);
2725    __ lvx             (vTmp1, keypos, key);
2726    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2727
2728    // load the 11th round key to vKey10
2729    __ addi            (keypos, keypos, -16);
2730    __ lvx             (vTmp2, keypos, key);
2731    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2732
2733    // 1st - 3rd rounds
2734    __ vxor            (vRet, vRet, vKey1);
2735    __ vncipher        (vRet, vRet, vKey2);
2736    __ vncipher        (vRet, vRet, vKey3);
2737
2738    __ b               (L_doLast);
2739
2740    __ bind            (L_do44);
2741
2742    // load the 11th round key to vKey11
2743    __ li              (keypos, 176);
2744    __ lvx             (vTmp1, keypos, key);
2745    __ addi            (keypos, keypos, -16);
2746    __ lvx             (vTmp2, keypos, key);
2747    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2748
2749    // 1st round
2750    __ vxor            (vRet, vRet, vKey1);
2751
2752    __ bind            (L_doLast);
2753
2754    // load the 10th round key to vKey10
2755    __ addi            (keypos, keypos, -16);
2756    __ lvx             (vTmp1, keypos, key);
2757    __ vperm           (vKey1, vTmp2, vTmp1, keyPerm);
2758
2759    // load the 9th round key to vKey10
2760    __ addi            (keypos, keypos, -16);
2761    __ lvx             (vTmp2, keypos, key);
2762    __ vperm           (vKey2, vTmp1, vTmp2, keyPerm);
2763
2764    // load the 8th round key to vKey10
2765    __ addi            (keypos, keypos, -16);
2766    __ lvx             (vTmp1, keypos, key);
2767    __ vperm           (vKey3, vTmp2, vTmp1, keyPerm);
2768
2769    // load the 7th round key to vKey10
2770    __ addi            (keypos, keypos, -16);
2771    __ lvx             (vTmp2, keypos, key);
2772    __ vperm           (vKey4, vTmp1, vTmp2, keyPerm);
2773
2774    // load the 6th round key to vKey10
2775    __ addi            (keypos, keypos, -16);
2776    __ lvx             (vTmp1, keypos, key);
2777    __ vperm           (vKey5, vTmp2, vTmp1, keyPerm);
2778
2779    // last 10th - 6th rounds
2780    __ vncipher        (vRet, vRet, vKey1);
2781    __ vncipher        (vRet, vRet, vKey2);
2782    __ vncipher        (vRet, vRet, vKey3);
2783    __ vncipher        (vRet, vRet, vKey4);
2784    __ vncipher        (vRet, vRet, vKey5);
2785
2786    // load the 5th round key to vKey10
2787    __ addi            (keypos, keypos, -16);
2788    __ lvx             (vTmp2, keypos, key);
2789    __ vperm           (vKey1, vTmp1, vTmp2, keyPerm);
2790
2791    // load the 4th round key to vKey10
2792    __ addi            (keypos, keypos, -16);
2793    __ lvx             (vTmp1, keypos, key);
2794    __ vperm           (vKey2, vTmp2, vTmp1, keyPerm);
2795
2796    // load the 3rd round key to vKey10
2797    __ addi            (keypos, keypos, -16);
2798    __ lvx             (vTmp2, keypos, key);
2799    __ vperm           (vKey3, vTmp1, vTmp2, keyPerm);
2800
2801    // load the 2nd round key to vKey10
2802    __ addi            (keypos, keypos, -16);
2803    __ lvx             (vTmp1, keypos, key);
2804    __ vperm           (vKey4, vTmp2, vTmp1, keyPerm);
2805
2806    // load the 1st round key to vKey10
2807    __ addi            (keypos, keypos, -16);
2808    __ lvx             (vTmp2, keypos, key);
2809    __ vperm           (vKey5, vTmp1, vTmp2, keyPerm);
2810
2811    // last 5th - 1th rounds
2812    __ vncipher        (vRet, vRet, vKey1);
2813    __ vncipher        (vRet, vRet, vKey2);
2814    __ vncipher        (vRet, vRet, vKey3);
2815    __ vncipher        (vRet, vRet, vKey4);
2816    __ vncipherlast    (vRet, vRet, vKey5);
2817
2818    __ neg             (temp, to);
2819    __ lvsr            (toPerm, temp);
2820    __ vspltisb        (vTmp2, -1);
2821    __ vxor            (vTmp1, vTmp1, vTmp1);
2822    __ vperm           (vTmp2, vTmp2, vTmp1, toPerm);
2823    __ vxor            (toPerm, toPerm, fSplt);
2824    __ lvx             (vTmp1, to);
2825    __ vperm           (vRet, vRet, vRet, toPerm);
2826    __ vsel            (vTmp1, vTmp1, vRet, vTmp2);
2827    __ lvx             (vTmp4, fifteen, to);
2828    __ stvx            (vTmp1, to);
2829    __ vsel            (vRet, vRet, vTmp4, vTmp2);
2830    __ stvx            (vRet, fifteen, to);
2831
2832    __ blr();
2833     return start;
2834  }
2835
2836  void generate_arraycopy_stubs() {
2837    // Note: the disjoint stubs must be generated first, some of
2838    // the conjoint stubs use them.
2839
2840    // non-aligned disjoint versions
2841    StubRoutines::_jbyte_disjoint_arraycopy       = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy");
2842    StubRoutines::_jshort_disjoint_arraycopy      = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
2843    StubRoutines::_jint_disjoint_arraycopy        = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy");
2844    StubRoutines::_jlong_disjoint_arraycopy       = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy");
2845    StubRoutines::_oop_disjoint_arraycopy         = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy", false);
2846    StubRoutines::_oop_disjoint_arraycopy_uninit  = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy_uninit", true);
2847
2848    // aligned disjoint versions
2849    StubRoutines::_arrayof_jbyte_disjoint_arraycopy      = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy");
2850    StubRoutines::_arrayof_jshort_disjoint_arraycopy     = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
2851    StubRoutines::_arrayof_jint_disjoint_arraycopy       = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy");
2852    StubRoutines::_arrayof_jlong_disjoint_arraycopy      = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy");
2853    StubRoutines::_arrayof_oop_disjoint_arraycopy        = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy", false);
2854    StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, "oop_disjoint_arraycopy_uninit", true);
2855
2856    // non-aligned conjoint versions
2857    StubRoutines::_jbyte_arraycopy      = generate_conjoint_byte_copy(false, "jbyte_arraycopy");
2858    StubRoutines::_jshort_arraycopy     = generate_conjoint_short_copy(false, "jshort_arraycopy");
2859    StubRoutines::_jint_arraycopy       = generate_conjoint_int_copy(false, "jint_arraycopy");
2860    StubRoutines::_jlong_arraycopy      = generate_conjoint_long_copy(false, "jlong_arraycopy");
2861    StubRoutines::_oop_arraycopy        = generate_conjoint_oop_copy(false, "oop_arraycopy", false);
2862    StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, "oop_arraycopy_uninit", true);
2863
2864    // aligned conjoint versions
2865    StubRoutines::_arrayof_jbyte_arraycopy      = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy");
2866    StubRoutines::_arrayof_jshort_arraycopy     = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
2867    StubRoutines::_arrayof_jint_arraycopy       = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy");
2868    StubRoutines::_arrayof_jlong_arraycopy      = generate_conjoint_long_copy(true, "arrayof_jlong_arraycopy");
2869    StubRoutines::_arrayof_oop_arraycopy        = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", false);
2870    StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, "arrayof_oop_arraycopy", true);
2871
2872    // special/generic versions
2873    StubRoutines::_checkcast_arraycopy        = generate_checkcast_copy("checkcast_arraycopy", false);
2874    StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", true);
2875
2876    StubRoutines::_unsafe_arraycopy  = generate_unsafe_copy("unsafe_arraycopy",
2877                                                            STUB_ENTRY(jbyte_arraycopy),
2878                                                            STUB_ENTRY(jshort_arraycopy),
2879                                                            STUB_ENTRY(jint_arraycopy),
2880                                                            STUB_ENTRY(jlong_arraycopy));
2881    StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
2882                                                             STUB_ENTRY(jbyte_arraycopy),
2883                                                             STUB_ENTRY(jshort_arraycopy),
2884                                                             STUB_ENTRY(jint_arraycopy),
2885                                                             STUB_ENTRY(oop_arraycopy),
2886                                                             STUB_ENTRY(oop_disjoint_arraycopy),
2887                                                             STUB_ENTRY(jlong_arraycopy),
2888                                                             STUB_ENTRY(checkcast_arraycopy));
2889
2890    // fill routines
2891    if (OptimizeFill) {
2892      StubRoutines::_jbyte_fill          = generate_fill(T_BYTE,  false, "jbyte_fill");
2893      StubRoutines::_jshort_fill         = generate_fill(T_SHORT, false, "jshort_fill");
2894      StubRoutines::_jint_fill           = generate_fill(T_INT,   false, "jint_fill");
2895      StubRoutines::_arrayof_jbyte_fill  = generate_fill(T_BYTE,  true, "arrayof_jbyte_fill");
2896      StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
2897      StubRoutines::_arrayof_jint_fill   = generate_fill(T_INT,   true, "arrayof_jint_fill");
2898    }
2899  }
2900
2901  // Safefetch stubs.
2902  void generate_safefetch(const char* name, int size, address* entry, address* fault_pc, address* continuation_pc) {
2903    // safefetch signatures:
2904    //   int      SafeFetch32(int*      adr, int      errValue);
2905    //   intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
2906    //
2907    // arguments:
2908    //   R3_ARG1 = adr
2909    //   R4_ARG2 = errValue
2910    //
2911    // result:
2912    //   R3_RET  = *adr or errValue
2913
2914    StubCodeMark mark(this, "StubRoutines", name);
2915
2916    // Entry point, pc or function descriptor.
2917    *entry = __ function_entry();
2918
2919    // Load *adr into R4_ARG2, may fault.
2920    *fault_pc = __ pc();
2921    switch (size) {
2922      case 4:
2923        // int32_t, signed extended
2924        __ lwa(R4_ARG2, 0, R3_ARG1);
2925        break;
2926      case 8:
2927        // int64_t
2928        __ ld(R4_ARG2, 0, R3_ARG1);
2929        break;
2930      default:
2931        ShouldNotReachHere();
2932    }
2933
2934    // return errValue or *adr
2935    *continuation_pc = __ pc();
2936    __ mr(R3_RET, R4_ARG2);
2937    __ blr();
2938  }
2939
2940  // Stub for BigInteger::multiplyToLen()
2941  //
2942  //  Arguments:
2943  //
2944  //  Input:
2945  //    R3 - x address
2946  //    R4 - x length
2947  //    R5 - y address
2948  //    R6 - y length
2949  //    R7 - z address
2950  //    R8 - z length
2951  //
2952  address generate_multiplyToLen() {
2953
2954    StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
2955
2956    address start = __ function_entry();
2957
2958    const Register x     = R3;
2959    const Register xlen  = R4;
2960    const Register y     = R5;
2961    const Register ylen  = R6;
2962    const Register z     = R7;
2963    const Register zlen  = R8;
2964
2965    const Register tmp1  = R2; // TOC not used.
2966    const Register tmp2  = R9;
2967    const Register tmp3  = R10;
2968    const Register tmp4  = R11;
2969    const Register tmp5  = R12;
2970
2971    // non-volatile regs
2972    const Register tmp6  = R31;
2973    const Register tmp7  = R30;
2974    const Register tmp8  = R29;
2975    const Register tmp9  = R28;
2976    const Register tmp10 = R27;
2977    const Register tmp11 = R26;
2978    const Register tmp12 = R25;
2979    const Register tmp13 = R24;
2980
2981    BLOCK_COMMENT("Entry:");
2982
2983    // C2 does not respect int to long conversion for stub calls.
2984    __ clrldi(xlen, xlen, 32);
2985    __ clrldi(ylen, ylen, 32);
2986    __ clrldi(zlen, zlen, 32);
2987
2988    // Save non-volatile regs (frameless).
2989    int current_offs = 8;
2990    __ std(R24, -current_offs, R1_SP); current_offs += 8;
2991    __ std(R25, -current_offs, R1_SP); current_offs += 8;
2992    __ std(R26, -current_offs, R1_SP); current_offs += 8;
2993    __ std(R27, -current_offs, R1_SP); current_offs += 8;
2994    __ std(R28, -current_offs, R1_SP); current_offs += 8;
2995    __ std(R29, -current_offs, R1_SP); current_offs += 8;
2996    __ std(R30, -current_offs, R1_SP); current_offs += 8;
2997    __ std(R31, -current_offs, R1_SP);
2998
2999    __ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5,
3000                       tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13);
3001
3002    // Restore non-volatile regs.
3003    current_offs = 8;
3004    __ ld(R24, -current_offs, R1_SP); current_offs += 8;
3005    __ ld(R25, -current_offs, R1_SP); current_offs += 8;
3006    __ ld(R26, -current_offs, R1_SP); current_offs += 8;
3007    __ ld(R27, -current_offs, R1_SP); current_offs += 8;
3008    __ ld(R28, -current_offs, R1_SP); current_offs += 8;
3009    __ ld(R29, -current_offs, R1_SP); current_offs += 8;
3010    __ ld(R30, -current_offs, R1_SP); current_offs += 8;
3011    __ ld(R31, -current_offs, R1_SP);
3012
3013    __ blr();  // Return to caller.
3014
3015    return start;
3016  }
3017
3018  /**
3019   * Arguments:
3020   *
3021   * Inputs:
3022   *   R3_ARG1    - int   crc
3023   *   R4_ARG2    - byte* buf
3024   *   R5_ARG3    - int   length (of buffer)
3025   *
3026   * scratch:
3027   *   R2, R6-R12
3028   *
3029   * Ouput:
3030   *   R3_RET     - int   crc result
3031   */
3032  // Compute CRC32 function.
3033  address generate_CRC32_updateBytes(const char* name) {
3034    __ align(CodeEntryAlignment);
3035    StubCodeMark mark(this, "StubRoutines", name);
3036    address start = __ function_entry();  // Remember stub start address (is rtn value).
3037
3038    // arguments to kernel_crc32:
3039    const Register crc     = R3_ARG1;  // Current checksum, preset by caller or result from previous call.
3040    const Register data    = R4_ARG2;  // source byte array
3041    const Register dataLen = R5_ARG3;  // #bytes to process
3042    const Register table   = R6_ARG4;  // crc table address
3043
3044    const Register t0      = R2;
3045    const Register t1      = R7;
3046    const Register t2      = R8;
3047    const Register t3      = R9;
3048    const Register tc0     = R10;
3049    const Register tc1     = R11;
3050    const Register tc2     = R12;
3051
3052    BLOCK_COMMENT("Stub body {");
3053    assert_different_registers(crc, data, dataLen, table);
3054
3055    StubRoutines::ppc64::generate_load_crc_table_addr(_masm, table);
3056
3057    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, tc0, tc1, tc2, table);
3058
3059    BLOCK_COMMENT("return");
3060    __ mr_if_needed(R3_RET, crc);      // Updated crc is function result. No copying required (R3_ARG1 == R3_RET).
3061    __ blr();
3062
3063    BLOCK_COMMENT("} Stub body");
3064    return start;
3065  }
3066
3067  // Initialization
3068  void generate_initial() {
3069    // Generates all stubs and initializes the entry points
3070
3071    // Entry points that exist in all platforms.
3072    // Note: This is code that could be shared among different platforms - however the
3073    // benefit seems to be smaller than the disadvantage of having a
3074    // much more complicated generator structure. See also comment in
3075    // stubRoutines.hpp.
3076
3077    StubRoutines::_forward_exception_entry          = generate_forward_exception();
3078    StubRoutines::_call_stub_entry                  = generate_call_stub(StubRoutines::_call_stub_return_address);
3079    StubRoutines::_catch_exception_entry            = generate_catch_exception();
3080
3081    // Build this early so it's available for the interpreter.
3082    StubRoutines::_throw_StackOverflowError_entry   =
3083      generate_throw_exception("StackOverflowError throw_exception",
3084                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError), false);
3085    StubRoutines::_throw_delayed_StackOverflowError_entry =
3086      generate_throw_exception("delayed StackOverflowError throw_exception",
3087                               CAST_FROM_FN_PTR(address, SharedRuntime::throw_delayed_StackOverflowError), false);
3088
3089    // CRC32 Intrinsics.
3090    if (UseCRC32Intrinsics) {
3091      StubRoutines::_crc_table_adr    = (address)StubRoutines::ppc64::_crc_table;
3092      StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
3093    }
3094  }
3095
3096  void generate_all() {
3097    // Generates all stubs and initializes the entry points
3098
3099    // These entry points require SharedInfo::stack0 to be set up in
3100    // non-core builds
3101    StubRoutines::_throw_AbstractMethodError_entry         = generate_throw_exception("AbstractMethodError throw_exception",          CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError),  false);
3102    // Handle IncompatibleClassChangeError in itable stubs.
3103    StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError),  false);
3104    StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
3105
3106    // support for verify_oop (must happen after universe_init)
3107    StubRoutines::_verify_oop_subroutine_entry             = generate_verify_oop();
3108
3109    // arraycopy stubs used by compilers
3110    generate_arraycopy_stubs();
3111
3112    // Safefetch stubs.
3113    generate_safefetch("SafeFetch32", sizeof(int),     &StubRoutines::_safefetch32_entry,
3114                                                       &StubRoutines::_safefetch32_fault_pc,
3115                                                       &StubRoutines::_safefetch32_continuation_pc);
3116    generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
3117                                                       &StubRoutines::_safefetchN_fault_pc,
3118                                                       &StubRoutines::_safefetchN_continuation_pc);
3119
3120#ifdef COMPILER2
3121    if (UseMultiplyToLenIntrinsic) {
3122      StubRoutines::_multiplyToLen = generate_multiplyToLen();
3123    }
3124#endif
3125
3126    if (UseMontgomeryMultiplyIntrinsic) {
3127      StubRoutines::_montgomeryMultiply
3128        = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_multiply);
3129    }
3130    if (UseMontgomerySquareIntrinsic) {
3131      StubRoutines::_montgomerySquare
3132        = CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
3133    }
3134
3135    if (UseAESIntrinsics) {
3136      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
3137      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
3138    }
3139
3140  }
3141
3142 public:
3143  StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3144    // replace the standard masm with a special one:
3145    _masm = new MacroAssembler(code);
3146    if (all) {
3147      generate_all();
3148    } else {
3149      generate_initial();
3150    }
3151  }
3152};
3153
3154void StubGenerator_generate(CodeBuffer* code, bool all) {
3155  StubGenerator g(code, all);
3156}
3157