c1_LIRAssembler.cpp revision 6856:5217fa82f1a4
1/*
2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Compilation.hpp"
27#include "c1/c1_Instruction.hpp"
28#include "c1/c1_InstructionPrinter.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_MacroAssembler.hpp"
31#include "c1/c1_ValueStack.hpp"
32#include "ci/ciInstance.hpp"
33#include "runtime/os.hpp"
34
35void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
36  // we must have enough patching space so that call can be inserted
37  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
38    _masm->nop();
39  }
40  patch->install(_masm, patch_code, obj, info);
41  append_code_stub(patch);
42
43#ifdef ASSERT
44  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
45  if (patch->id() == PatchingStub::access_field_id) {
46    switch (code) {
47      case Bytecodes::_putstatic:
48      case Bytecodes::_getstatic:
49      case Bytecodes::_putfield:
50      case Bytecodes::_getfield:
51        break;
52      default:
53        ShouldNotReachHere();
54    }
55  } else if (patch->id() == PatchingStub::load_klass_id) {
56    switch (code) {
57      case Bytecodes::_new:
58      case Bytecodes::_anewarray:
59      case Bytecodes::_multianewarray:
60      case Bytecodes::_instanceof:
61      case Bytecodes::_checkcast:
62        break;
63      default:
64        ShouldNotReachHere();
65    }
66  } else if (patch->id() == PatchingStub::load_mirror_id) {
67    switch (code) {
68      case Bytecodes::_putstatic:
69      case Bytecodes::_getstatic:
70      case Bytecodes::_ldc:
71      case Bytecodes::_ldc_w:
72        break;
73      default:
74        ShouldNotReachHere();
75    }
76  } else if (patch->id() == PatchingStub::load_appendix_id) {
77    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
78    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
79  } else {
80    ShouldNotReachHere();
81  }
82#endif
83}
84
85PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
86  IRScope* scope = info->scope();
87  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
88  if (Bytecodes::has_optional_appendix(bc_raw)) {
89    return PatchingStub::load_appendix_id;
90  }
91  return PatchingStub::load_mirror_id;
92}
93
94//---------------------------------------------------------------
95
96
97LIR_Assembler::LIR_Assembler(Compilation* c):
98   _compilation(c)
99 , _masm(c->masm())
100 , _bs(Universe::heap()->barrier_set())
101 , _frame_map(c->frame_map())
102 , _current_block(NULL)
103 , _pending_non_safepoint(NULL)
104 , _pending_non_safepoint_offset(0)
105{
106  _slow_case_stubs = new CodeStubList();
107}
108
109
110LIR_Assembler::~LIR_Assembler() {
111}
112
113
114void LIR_Assembler::check_codespace() {
115  CodeSection* cs = _masm->code_section();
116  if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
117    BAILOUT("CodeBuffer overflow");
118  }
119}
120
121
122void LIR_Assembler::append_code_stub(CodeStub* stub) {
123  _slow_case_stubs->append(stub);
124}
125
126void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
127  for (int m = 0; m < stub_list->length(); m++) {
128    CodeStub* s = (*stub_list)[m];
129
130    check_codespace();
131    CHECK_BAILOUT();
132
133#ifndef PRODUCT
134    if (CommentedAssembly) {
135      stringStream st;
136      s->print_name(&st);
137      st.print(" slow case");
138      _masm->block_comment(st.as_string());
139    }
140#endif
141    s->emit_code(this);
142#ifdef ASSERT
143    s->assert_no_unbound_labels();
144#endif
145  }
146}
147
148
149void LIR_Assembler::emit_slow_case_stubs() {
150  emit_stubs(_slow_case_stubs);
151}
152
153
154bool LIR_Assembler::needs_icache(ciMethod* method) const {
155  return !method->is_static();
156}
157
158
159int LIR_Assembler::code_offset() const {
160  return _masm->offset();
161}
162
163
164address LIR_Assembler::pc() const {
165  return _masm->pc();
166}
167
168// To bang the stack of this compiled method we use the stack size
169// that the interpreter would need in case of a deoptimization. This
170// removes the need to bang the stack in the deoptimization blob which
171// in turn simplifies stack overflow handling.
172int LIR_Assembler::bang_size_in_bytes() const {
173  return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
174}
175
176void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
177  for (int i = 0; i < info_list->length(); i++) {
178    XHandlers* handlers = info_list->at(i)->exception_handlers();
179
180    for (int j = 0; j < handlers->length(); j++) {
181      XHandler* handler = handlers->handler_at(j);
182      assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
183      assert(handler->entry_code() == NULL ||
184             handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
185             handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
186
187      if (handler->entry_pco() == -1) {
188        // entry code not emitted yet
189        if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
190          handler->set_entry_pco(code_offset());
191          if (CommentedAssembly) {
192            _masm->block_comment("Exception adapter block");
193          }
194          emit_lir_list(handler->entry_code());
195        } else {
196          handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
197        }
198
199        assert(handler->entry_pco() != -1, "must be set now");
200      }
201    }
202  }
203}
204
205
206void LIR_Assembler::emit_code(BlockList* hir) {
207  if (PrintLIR) {
208    print_LIR(hir);
209  }
210
211  int n = hir->length();
212  for (int i = 0; i < n; i++) {
213    emit_block(hir->at(i));
214    CHECK_BAILOUT();
215  }
216
217  flush_debug_info(code_offset());
218
219  DEBUG_ONLY(check_no_unbound_labels());
220}
221
222
223void LIR_Assembler::emit_block(BlockBegin* block) {
224  if (block->is_set(BlockBegin::backward_branch_target_flag)) {
225    align_backward_branch_target();
226  }
227
228  // if this block is the start of an exception handler, record the
229  // PC offset of the first instruction for later construction of
230  // the ExceptionHandlerTable
231  if (block->is_set(BlockBegin::exception_entry_flag)) {
232    block->set_exception_handler_pco(code_offset());
233  }
234
235#ifndef PRODUCT
236  if (PrintLIRWithAssembly) {
237    // don't print Phi's
238    InstructionPrinter ip(false);
239    block->print(ip);
240  }
241#endif /* PRODUCT */
242
243  assert(block->lir() != NULL, "must have LIR");
244  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
245
246#ifndef PRODUCT
247  if (CommentedAssembly) {
248    stringStream st;
249    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
250    _masm->block_comment(st.as_string());
251  }
252#endif
253
254  emit_lir_list(block->lir());
255
256  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
257}
258
259
260void LIR_Assembler::emit_lir_list(LIR_List* list) {
261  peephole(list);
262
263  int n = list->length();
264  for (int i = 0; i < n; i++) {
265    LIR_Op* op = list->at(i);
266
267    check_codespace();
268    CHECK_BAILOUT();
269
270#ifndef PRODUCT
271    if (CommentedAssembly) {
272      // Don't record out every op since that's too verbose.  Print
273      // branches since they include block and stub names.  Also print
274      // patching moves since they generate funny looking code.
275      if (op->code() == lir_branch ||
276          (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
277        stringStream st;
278        op->print_on(&st);
279        _masm->block_comment(st.as_string());
280      }
281    }
282    if (PrintLIRWithAssembly) {
283      // print out the LIR operation followed by the resulting assembly
284      list->at(i)->print(); tty->cr();
285    }
286#endif /* PRODUCT */
287
288    op->emit_code(this);
289
290    if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
291      process_debug_info(op);
292    }
293
294#ifndef PRODUCT
295    if (PrintLIRWithAssembly) {
296      _masm->code()->decode();
297    }
298#endif /* PRODUCT */
299  }
300}
301
302#ifdef ASSERT
303void LIR_Assembler::check_no_unbound_labels() {
304  CHECK_BAILOUT();
305
306  for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
307    if (!_branch_target_blocks.at(i)->label()->is_bound()) {
308      tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
309      assert(false, "unbound label");
310    }
311  }
312}
313#endif
314
315//----------------------------------debug info--------------------------------
316
317
318void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
319  int pc_offset = code_offset();
320  flush_debug_info(pc_offset);
321  info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
322  if (info->exception_handlers() != NULL) {
323    compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
324  }
325}
326
327
328void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
329  flush_debug_info(pc_offset);
330  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
331  if (cinfo->exception_handlers() != NULL) {
332    compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
333  }
334}
335
336static ValueStack* debug_info(Instruction* ins) {
337  StateSplit* ss = ins->as_StateSplit();
338  if (ss != NULL) return ss->state();
339  return ins->state_before();
340}
341
342void LIR_Assembler::process_debug_info(LIR_Op* op) {
343  Instruction* src = op->source();
344  if (src == NULL)  return;
345  int pc_offset = code_offset();
346  if (_pending_non_safepoint == src) {
347    _pending_non_safepoint_offset = pc_offset;
348    return;
349  }
350  ValueStack* vstack = debug_info(src);
351  if (vstack == NULL)  return;
352  if (_pending_non_safepoint != NULL) {
353    // Got some old debug info.  Get rid of it.
354    if (debug_info(_pending_non_safepoint) == vstack) {
355      _pending_non_safepoint_offset = pc_offset;
356      return;
357    }
358    if (_pending_non_safepoint_offset < pc_offset) {
359      record_non_safepoint_debug_info();
360    }
361    _pending_non_safepoint = NULL;
362  }
363  // Remember the debug info.
364  if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
365    _pending_non_safepoint = src;
366    _pending_non_safepoint_offset = pc_offset;
367  }
368}
369
370// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
371// Return NULL if n is too large.
372// Returns the caller_bci for the next-younger state, also.
373static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
374  ValueStack* t = s;
375  for (int i = 0; i < n; i++) {
376    if (t == NULL)  break;
377    t = t->caller_state();
378  }
379  if (t == NULL)  return NULL;
380  for (;;) {
381    ValueStack* tc = t->caller_state();
382    if (tc == NULL)  return s;
383    t = tc;
384    bci_result = tc->bci();
385    s = s->caller_state();
386  }
387}
388
389void LIR_Assembler::record_non_safepoint_debug_info() {
390  int         pc_offset = _pending_non_safepoint_offset;
391  ValueStack* vstack    = debug_info(_pending_non_safepoint);
392  int         bci       = vstack->bci();
393
394  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
395  assert(debug_info->recording_non_safepoints(), "sanity");
396
397  debug_info->add_non_safepoint(pc_offset);
398
399  // Visit scopes from oldest to youngest.
400  for (int n = 0; ; n++) {
401    int s_bci = bci;
402    ValueStack* s = nth_oldest(vstack, n, s_bci);
403    if (s == NULL)  break;
404    IRScope* scope = s->scope();
405    //Always pass false for reexecute since these ScopeDescs are never used for deopt
406    debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
407  }
408
409  debug_info->end_non_safepoint(pc_offset);
410}
411
412
413void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
414  add_debug_info_for_null_check(code_offset(), cinfo);
415}
416
417void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
418  ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
419  append_code_stub(stub);
420}
421
422void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
423  add_debug_info_for_div0(code_offset(), info);
424}
425
426void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
427  DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
428  append_code_stub(stub);
429}
430
431void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
432  rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
433}
434
435
436void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
437  verify_oop_map(op->info());
438
439  if (os::is_MP()) {
440    // must align calls sites, otherwise they can't be updated atomically on MP hardware
441    align_call(op->code());
442  }
443
444  // emit the static call stub stuff out of line
445  emit_static_call_stub();
446
447  switch (op->code()) {
448  case lir_static_call:
449  case lir_dynamic_call:
450    call(op, relocInfo::static_call_type);
451    break;
452  case lir_optvirtual_call:
453    call(op, relocInfo::opt_virtual_call_type);
454    break;
455  case lir_icvirtual_call:
456    ic_call(op);
457    break;
458  case lir_virtual_call:
459    vtable_call(op);
460    break;
461  default:
462    fatal(err_msg_res("unexpected op code: %s", op->name()));
463    break;
464  }
465
466  // JSR 292
467  // Record if this method has MethodHandle invokes.
468  if (op->is_method_handle_invoke()) {
469    compilation()->set_has_method_handle_invokes(true);
470  }
471
472#if defined(X86) && defined(TIERED)
473  // C2 leave fpu stack dirty clean it
474  if (UseSSE < 2) {
475    int i;
476    for ( i = 1; i <= 7 ; i++ ) {
477      ffree(i);
478    }
479    if (!op->result_opr()->is_float_kind()) {
480      ffree(0);
481    }
482  }
483#endif // X86 && TIERED
484}
485
486
487void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
488  _masm->bind (*(op->label()));
489}
490
491
492void LIR_Assembler::emit_op1(LIR_Op1* op) {
493  switch (op->code()) {
494    case lir_move:
495      if (op->move_kind() == lir_move_volatile) {
496        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
497        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
498      } else {
499        move_op(op->in_opr(), op->result_opr(), op->type(),
500                op->patch_code(), op->info(), op->pop_fpu_stack(),
501                op->move_kind() == lir_move_unaligned,
502                op->move_kind() == lir_move_wide);
503      }
504      break;
505
506    case lir_prefetchr:
507      prefetchr(op->in_opr());
508      break;
509
510    case lir_prefetchw:
511      prefetchw(op->in_opr());
512      break;
513
514    case lir_roundfp: {
515      LIR_OpRoundFP* round_op = op->as_OpRoundFP();
516      roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
517      break;
518    }
519
520    case lir_return:
521      return_op(op->in_opr());
522      break;
523
524    case lir_safepoint:
525      if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
526        _masm->nop();
527      }
528      safepoint_poll(op->in_opr(), op->info());
529      break;
530
531    case lir_fxch:
532      fxch(op->in_opr()->as_jint());
533      break;
534
535    case lir_fld:
536      fld(op->in_opr()->as_jint());
537      break;
538
539    case lir_ffree:
540      ffree(op->in_opr()->as_jint());
541      break;
542
543    case lir_branch:
544      break;
545
546    case lir_push:
547      push(op->in_opr());
548      break;
549
550    case lir_pop:
551      pop(op->in_opr());
552      break;
553
554    case lir_neg:
555      negate(op->in_opr(), op->result_opr());
556      break;
557
558    case lir_leal:
559      leal(op->in_opr(), op->result_opr());
560      break;
561
562    case lir_null_check:
563      if (GenerateCompilerNullChecks) {
564        add_debug_info_for_null_check_here(op->info());
565
566        if (op->in_opr()->is_single_cpu()) {
567          _masm->null_check(op->in_opr()->as_register());
568        } else {
569          Unimplemented();
570        }
571      }
572      break;
573
574    case lir_monaddr:
575      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
576      break;
577
578#ifdef SPARC
579    case lir_pack64:
580      pack64(op->in_opr(), op->result_opr());
581      break;
582
583    case lir_unpack64:
584      unpack64(op->in_opr(), op->result_opr());
585      break;
586#endif
587
588    case lir_unwind:
589      unwind_op(op->in_opr());
590      break;
591
592    default:
593      Unimplemented();
594      break;
595  }
596}
597
598
599void LIR_Assembler::emit_op0(LIR_Op0* op) {
600  switch (op->code()) {
601    case lir_word_align: {
602      while (code_offset() % BytesPerWord != 0) {
603        _masm->nop();
604      }
605      break;
606    }
607
608    case lir_nop:
609      assert(op->info() == NULL, "not supported");
610      _masm->nop();
611      break;
612
613    case lir_label:
614      Unimplemented();
615      break;
616
617    case lir_build_frame:
618      build_frame();
619      break;
620
621    case lir_std_entry:
622      // init offsets
623      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
624      _masm->align(CodeEntryAlignment);
625      if (needs_icache(compilation()->method())) {
626        check_icache();
627      }
628      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
629      _masm->verified_entry();
630      build_frame();
631      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
632      break;
633
634    case lir_osr_entry:
635      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
636      osr_entry();
637      break;
638
639    case lir_24bit_FPU:
640      set_24bit_FPU();
641      break;
642
643    case lir_reset_FPU:
644      reset_FPU();
645      break;
646
647    case lir_breakpoint:
648      breakpoint();
649      break;
650
651    case lir_fpop_raw:
652      fpop();
653      break;
654
655    case lir_membar:
656      membar();
657      break;
658
659    case lir_membar_acquire:
660      membar_acquire();
661      break;
662
663    case lir_membar_release:
664      membar_release();
665      break;
666
667    case lir_membar_loadload:
668      membar_loadload();
669      break;
670
671    case lir_membar_storestore:
672      membar_storestore();
673      break;
674
675    case lir_membar_loadstore:
676      membar_loadstore();
677      break;
678
679    case lir_membar_storeload:
680      membar_storeload();
681      break;
682
683    case lir_get_thread:
684      get_thread(op->result_opr());
685      break;
686
687    default:
688      ShouldNotReachHere();
689      break;
690  }
691}
692
693
694void LIR_Assembler::emit_op2(LIR_Op2* op) {
695  switch (op->code()) {
696    case lir_cmp:
697      if (op->info() != NULL) {
698        assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
699               "shouldn't be codeemitinfo for non-address operands");
700        add_debug_info_for_null_check_here(op->info()); // exception possible
701      }
702      comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
703      break;
704
705    case lir_cmp_l2i:
706    case lir_cmp_fd2i:
707    case lir_ucmp_fd2i:
708      comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
709      break;
710
711    case lir_cmove:
712      cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
713      break;
714
715    case lir_shl:
716    case lir_shr:
717    case lir_ushr:
718      if (op->in_opr2()->is_constant()) {
719        shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
720      } else {
721        shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
722      }
723      break;
724
725    case lir_add:
726    case lir_sub:
727    case lir_mul:
728    case lir_mul_strictfp:
729    case lir_div:
730    case lir_div_strictfp:
731    case lir_rem:
732      assert(op->fpu_pop_count() < 2, "");
733      arith_op(
734        op->code(),
735        op->in_opr1(),
736        op->in_opr2(),
737        op->result_opr(),
738        op->info(),
739        op->fpu_pop_count() == 1);
740      break;
741
742    case lir_abs:
743    case lir_sqrt:
744    case lir_sin:
745    case lir_tan:
746    case lir_cos:
747    case lir_log:
748    case lir_log10:
749    case lir_exp:
750    case lir_pow:
751      intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
752      break;
753
754    case lir_logic_and:
755    case lir_logic_or:
756    case lir_logic_xor:
757      logic_op(
758        op->code(),
759        op->in_opr1(),
760        op->in_opr2(),
761        op->result_opr());
762      break;
763
764    case lir_throw:
765      throw_op(op->in_opr1(), op->in_opr2(), op->info());
766      break;
767
768    case lir_xadd:
769    case lir_xchg:
770      atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
771      break;
772
773    default:
774      Unimplemented();
775      break;
776  }
777}
778
779
780void LIR_Assembler::build_frame() {
781  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
782}
783
784
785void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
786  assert((src->is_single_fpu() && dest->is_single_stack()) ||
787         (src->is_double_fpu() && dest->is_double_stack()),
788         "round_fp: rounds register -> stack location");
789
790  reg2stack (src, dest, src->type(), pop_fpu_stack);
791}
792
793
794void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
795  if (src->is_register()) {
796    if (dest->is_register()) {
797      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
798      reg2reg(src,  dest);
799    } else if (dest->is_stack()) {
800      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
801      reg2stack(src, dest, type, pop_fpu_stack);
802    } else if (dest->is_address()) {
803      reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
804    } else {
805      ShouldNotReachHere();
806    }
807
808  } else if (src->is_stack()) {
809    assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
810    if (dest->is_register()) {
811      stack2reg(src, dest, type);
812    } else if (dest->is_stack()) {
813      stack2stack(src, dest, type);
814    } else {
815      ShouldNotReachHere();
816    }
817
818  } else if (src->is_constant()) {
819    if (dest->is_register()) {
820      const2reg(src, dest, patch_code, info); // patching is possible
821    } else if (dest->is_stack()) {
822      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
823      const2stack(src, dest);
824    } else if (dest->is_address()) {
825      assert(patch_code == lir_patch_none, "no patching allowed here");
826      const2mem(src, dest, type, info, wide);
827    } else {
828      ShouldNotReachHere();
829    }
830
831  } else if (src->is_address()) {
832    mem2reg(src, dest, type, patch_code, info, wide, unaligned);
833
834  } else {
835    ShouldNotReachHere();
836  }
837}
838
839
840void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
841#ifndef PRODUCT
842  if (VerifyOops) {
843    OopMapStream s(info->oop_map());
844    while (!s.is_done()) {
845      OopMapValue v = s.current();
846      if (v.is_oop()) {
847        VMReg r = v.reg();
848        if (!r->is_stack()) {
849          stringStream st;
850          st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
851#ifdef SPARC
852          _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
853#else
854          _masm->verify_oop(r->as_Register());
855#endif
856        } else {
857          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
858        }
859      }
860      check_codespace();
861      CHECK_BAILOUT();
862
863      s.next();
864    }
865  }
866#endif
867}
868