c1_LIRAssembler.cpp revision 6760:22b98ab2a69f
1/*
2 * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Compilation.hpp"
27#include "c1/c1_Instruction.hpp"
28#include "c1/c1_InstructionPrinter.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_MacroAssembler.hpp"
31#include "c1/c1_ValueStack.hpp"
32#include "ci/ciInstance.hpp"
33
34void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
35  // we must have enough patching space so that call can be inserted
36  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
37    _masm->nop();
38  }
39  patch->install(_masm, patch_code, obj, info);
40  append_code_stub(patch);
41
42#ifdef ASSERT
43  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
44  if (patch->id() == PatchingStub::access_field_id) {
45    switch (code) {
46      case Bytecodes::_putstatic:
47      case Bytecodes::_getstatic:
48      case Bytecodes::_putfield:
49      case Bytecodes::_getfield:
50        break;
51      default:
52        ShouldNotReachHere();
53    }
54  } else if (patch->id() == PatchingStub::load_klass_id) {
55    switch (code) {
56      case Bytecodes::_new:
57      case Bytecodes::_anewarray:
58      case Bytecodes::_multianewarray:
59      case Bytecodes::_instanceof:
60      case Bytecodes::_checkcast:
61        break;
62      default:
63        ShouldNotReachHere();
64    }
65  } else if (patch->id() == PatchingStub::load_mirror_id) {
66    switch (code) {
67      case Bytecodes::_putstatic:
68      case Bytecodes::_getstatic:
69      case Bytecodes::_ldc:
70      case Bytecodes::_ldc_w:
71        break;
72      default:
73        ShouldNotReachHere();
74    }
75  } else if (patch->id() == PatchingStub::load_appendix_id) {
76    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
77    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
78  } else {
79    ShouldNotReachHere();
80  }
81#endif
82}
83
84PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
85  IRScope* scope = info->scope();
86  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
87  if (Bytecodes::has_optional_appendix(bc_raw)) {
88    return PatchingStub::load_appendix_id;
89  }
90  return PatchingStub::load_mirror_id;
91}
92
93//---------------------------------------------------------------
94
95
96LIR_Assembler::LIR_Assembler(Compilation* c):
97   _compilation(c)
98 , _masm(c->masm())
99 , _bs(Universe::heap()->barrier_set())
100 , _frame_map(c->frame_map())
101 , _current_block(NULL)
102 , _pending_non_safepoint(NULL)
103 , _pending_non_safepoint_offset(0)
104{
105  _slow_case_stubs = new CodeStubList();
106}
107
108
109LIR_Assembler::~LIR_Assembler() {
110}
111
112
113void LIR_Assembler::check_codespace() {
114  CodeSection* cs = _masm->code_section();
115  if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
116    BAILOUT("CodeBuffer overflow");
117  }
118}
119
120
121void LIR_Assembler::append_code_stub(CodeStub* stub) {
122  _slow_case_stubs->append(stub);
123}
124
125void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
126  for (int m = 0; m < stub_list->length(); m++) {
127    CodeStub* s = (*stub_list)[m];
128
129    check_codespace();
130    CHECK_BAILOUT();
131
132#ifndef PRODUCT
133    if (CommentedAssembly) {
134      stringStream st;
135      s->print_name(&st);
136      st.print(" slow case");
137      _masm->block_comment(st.as_string());
138    }
139#endif
140    s->emit_code(this);
141#ifdef ASSERT
142    s->assert_no_unbound_labels();
143#endif
144  }
145}
146
147
148void LIR_Assembler::emit_slow_case_stubs() {
149  emit_stubs(_slow_case_stubs);
150}
151
152
153bool LIR_Assembler::needs_icache(ciMethod* method) const {
154  return !method->is_static();
155}
156
157
158int LIR_Assembler::code_offset() const {
159  return _masm->offset();
160}
161
162
163address LIR_Assembler::pc() const {
164  return _masm->pc();
165}
166
167// To bang the stack of this compiled method we use the stack size
168// that the interpreter would need in case of a deoptimization. This
169// removes the need to bang the stack in the deoptimization blob which
170// in turn simplifies stack overflow handling.
171int LIR_Assembler::bang_size_in_bytes() const {
172  return MAX2(initial_frame_size_in_bytes(), _compilation->interpreter_frame_size());
173}
174
175void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
176  for (int i = 0; i < info_list->length(); i++) {
177    XHandlers* handlers = info_list->at(i)->exception_handlers();
178
179    for (int j = 0; j < handlers->length(); j++) {
180      XHandler* handler = handlers->handler_at(j);
181      assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
182      assert(handler->entry_code() == NULL ||
183             handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
184             handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
185
186      if (handler->entry_pco() == -1) {
187        // entry code not emitted yet
188        if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
189          handler->set_entry_pco(code_offset());
190          if (CommentedAssembly) {
191            _masm->block_comment("Exception adapter block");
192          }
193          emit_lir_list(handler->entry_code());
194        } else {
195          handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
196        }
197
198        assert(handler->entry_pco() != -1, "must be set now");
199      }
200    }
201  }
202}
203
204
205void LIR_Assembler::emit_code(BlockList* hir) {
206  if (PrintLIR) {
207    print_LIR(hir);
208  }
209
210  int n = hir->length();
211  for (int i = 0; i < n; i++) {
212    emit_block(hir->at(i));
213    CHECK_BAILOUT();
214  }
215
216  flush_debug_info(code_offset());
217
218  DEBUG_ONLY(check_no_unbound_labels());
219}
220
221
222void LIR_Assembler::emit_block(BlockBegin* block) {
223  if (block->is_set(BlockBegin::backward_branch_target_flag)) {
224    align_backward_branch_target();
225  }
226
227  // if this block is the start of an exception handler, record the
228  // PC offset of the first instruction for later construction of
229  // the ExceptionHandlerTable
230  if (block->is_set(BlockBegin::exception_entry_flag)) {
231    block->set_exception_handler_pco(code_offset());
232  }
233
234#ifndef PRODUCT
235  if (PrintLIRWithAssembly) {
236    // don't print Phi's
237    InstructionPrinter ip(false);
238    block->print(ip);
239  }
240#endif /* PRODUCT */
241
242  assert(block->lir() != NULL, "must have LIR");
243  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
244
245#ifndef PRODUCT
246  if (CommentedAssembly) {
247    stringStream st;
248    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
249    _masm->block_comment(st.as_string());
250  }
251#endif
252
253  emit_lir_list(block->lir());
254
255  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
256}
257
258
259void LIR_Assembler::emit_lir_list(LIR_List* list) {
260  peephole(list);
261
262  int n = list->length();
263  for (int i = 0; i < n; i++) {
264    LIR_Op* op = list->at(i);
265
266    check_codespace();
267    CHECK_BAILOUT();
268
269#ifndef PRODUCT
270    if (CommentedAssembly) {
271      // Don't record out every op since that's too verbose.  Print
272      // branches since they include block and stub names.  Also print
273      // patching moves since they generate funny looking code.
274      if (op->code() == lir_branch ||
275          (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
276        stringStream st;
277        op->print_on(&st);
278        _masm->block_comment(st.as_string());
279      }
280    }
281    if (PrintLIRWithAssembly) {
282      // print out the LIR operation followed by the resulting assembly
283      list->at(i)->print(); tty->cr();
284    }
285#endif /* PRODUCT */
286
287    op->emit_code(this);
288
289    if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
290      process_debug_info(op);
291    }
292
293#ifndef PRODUCT
294    if (PrintLIRWithAssembly) {
295      _masm->code()->decode();
296    }
297#endif /* PRODUCT */
298  }
299}
300
301#ifdef ASSERT
302void LIR_Assembler::check_no_unbound_labels() {
303  CHECK_BAILOUT();
304
305  for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
306    if (!_branch_target_blocks.at(i)->label()->is_bound()) {
307      tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
308      assert(false, "unbound label");
309    }
310  }
311}
312#endif
313
314//----------------------------------debug info--------------------------------
315
316
317void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
318  int pc_offset = code_offset();
319  flush_debug_info(pc_offset);
320  info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
321  if (info->exception_handlers() != NULL) {
322    compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
323  }
324}
325
326
327void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
328  flush_debug_info(pc_offset);
329  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
330  if (cinfo->exception_handlers() != NULL) {
331    compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
332  }
333}
334
335static ValueStack* debug_info(Instruction* ins) {
336  StateSplit* ss = ins->as_StateSplit();
337  if (ss != NULL) return ss->state();
338  return ins->state_before();
339}
340
341void LIR_Assembler::process_debug_info(LIR_Op* op) {
342  Instruction* src = op->source();
343  if (src == NULL)  return;
344  int pc_offset = code_offset();
345  if (_pending_non_safepoint == src) {
346    _pending_non_safepoint_offset = pc_offset;
347    return;
348  }
349  ValueStack* vstack = debug_info(src);
350  if (vstack == NULL)  return;
351  if (_pending_non_safepoint != NULL) {
352    // Got some old debug info.  Get rid of it.
353    if (debug_info(_pending_non_safepoint) == vstack) {
354      _pending_non_safepoint_offset = pc_offset;
355      return;
356    }
357    if (_pending_non_safepoint_offset < pc_offset) {
358      record_non_safepoint_debug_info();
359    }
360    _pending_non_safepoint = NULL;
361  }
362  // Remember the debug info.
363  if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
364    _pending_non_safepoint = src;
365    _pending_non_safepoint_offset = pc_offset;
366  }
367}
368
369// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
370// Return NULL if n is too large.
371// Returns the caller_bci for the next-younger state, also.
372static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
373  ValueStack* t = s;
374  for (int i = 0; i < n; i++) {
375    if (t == NULL)  break;
376    t = t->caller_state();
377  }
378  if (t == NULL)  return NULL;
379  for (;;) {
380    ValueStack* tc = t->caller_state();
381    if (tc == NULL)  return s;
382    t = tc;
383    bci_result = tc->bci();
384    s = s->caller_state();
385  }
386}
387
388void LIR_Assembler::record_non_safepoint_debug_info() {
389  int         pc_offset = _pending_non_safepoint_offset;
390  ValueStack* vstack    = debug_info(_pending_non_safepoint);
391  int         bci       = vstack->bci();
392
393  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
394  assert(debug_info->recording_non_safepoints(), "sanity");
395
396  debug_info->add_non_safepoint(pc_offset);
397
398  // Visit scopes from oldest to youngest.
399  for (int n = 0; ; n++) {
400    int s_bci = bci;
401    ValueStack* s = nth_oldest(vstack, n, s_bci);
402    if (s == NULL)  break;
403    IRScope* scope = s->scope();
404    //Always pass false for reexecute since these ScopeDescs are never used for deopt
405    debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
406  }
407
408  debug_info->end_non_safepoint(pc_offset);
409}
410
411
412void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
413  add_debug_info_for_null_check(code_offset(), cinfo);
414}
415
416void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
417  ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
418  append_code_stub(stub);
419}
420
421void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
422  add_debug_info_for_div0(code_offset(), info);
423}
424
425void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
426  DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
427  append_code_stub(stub);
428}
429
430void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
431  rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
432}
433
434
435void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
436  verify_oop_map(op->info());
437
438  if (os::is_MP()) {
439    // must align calls sites, otherwise they can't be updated atomically on MP hardware
440    align_call(op->code());
441  }
442
443  // emit the static call stub stuff out of line
444  emit_static_call_stub();
445
446  switch (op->code()) {
447  case lir_static_call:
448  case lir_dynamic_call:
449    call(op, relocInfo::static_call_type);
450    break;
451  case lir_optvirtual_call:
452    call(op, relocInfo::opt_virtual_call_type);
453    break;
454  case lir_icvirtual_call:
455    ic_call(op);
456    break;
457  case lir_virtual_call:
458    vtable_call(op);
459    break;
460  default:
461    fatal(err_msg_res("unexpected op code: %s", op->name()));
462    break;
463  }
464
465  // JSR 292
466  // Record if this method has MethodHandle invokes.
467  if (op->is_method_handle_invoke()) {
468    compilation()->set_has_method_handle_invokes(true);
469  }
470
471#if defined(X86) && defined(TIERED)
472  // C2 leave fpu stack dirty clean it
473  if (UseSSE < 2) {
474    int i;
475    for ( i = 1; i <= 7 ; i++ ) {
476      ffree(i);
477    }
478    if (!op->result_opr()->is_float_kind()) {
479      ffree(0);
480    }
481  }
482#endif // X86 && TIERED
483}
484
485
486void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
487  _masm->bind (*(op->label()));
488}
489
490
491void LIR_Assembler::emit_op1(LIR_Op1* op) {
492  switch (op->code()) {
493    case lir_move:
494      if (op->move_kind() == lir_move_volatile) {
495        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
496        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
497      } else {
498        move_op(op->in_opr(), op->result_opr(), op->type(),
499                op->patch_code(), op->info(), op->pop_fpu_stack(),
500                op->move_kind() == lir_move_unaligned,
501                op->move_kind() == lir_move_wide);
502      }
503      break;
504
505    case lir_prefetchr:
506      prefetchr(op->in_opr());
507      break;
508
509    case lir_prefetchw:
510      prefetchw(op->in_opr());
511      break;
512
513    case lir_roundfp: {
514      LIR_OpRoundFP* round_op = op->as_OpRoundFP();
515      roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
516      break;
517    }
518
519    case lir_return:
520      return_op(op->in_opr());
521      break;
522
523    case lir_safepoint:
524      if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
525        _masm->nop();
526      }
527      safepoint_poll(op->in_opr(), op->info());
528      break;
529
530    case lir_fxch:
531      fxch(op->in_opr()->as_jint());
532      break;
533
534    case lir_fld:
535      fld(op->in_opr()->as_jint());
536      break;
537
538    case lir_ffree:
539      ffree(op->in_opr()->as_jint());
540      break;
541
542    case lir_branch:
543      break;
544
545    case lir_push:
546      push(op->in_opr());
547      break;
548
549    case lir_pop:
550      pop(op->in_opr());
551      break;
552
553    case lir_neg:
554      negate(op->in_opr(), op->result_opr());
555      break;
556
557    case lir_leal:
558      leal(op->in_opr(), op->result_opr());
559      break;
560
561    case lir_null_check:
562      if (GenerateCompilerNullChecks) {
563        add_debug_info_for_null_check_here(op->info());
564
565        if (op->in_opr()->is_single_cpu()) {
566          _masm->null_check(op->in_opr()->as_register());
567        } else {
568          Unimplemented();
569        }
570      }
571      break;
572
573    case lir_monaddr:
574      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
575      break;
576
577#ifdef SPARC
578    case lir_pack64:
579      pack64(op->in_opr(), op->result_opr());
580      break;
581
582    case lir_unpack64:
583      unpack64(op->in_opr(), op->result_opr());
584      break;
585#endif
586
587    case lir_unwind:
588      unwind_op(op->in_opr());
589      break;
590
591    default:
592      Unimplemented();
593      break;
594  }
595}
596
597
598void LIR_Assembler::emit_op0(LIR_Op0* op) {
599  switch (op->code()) {
600    case lir_word_align: {
601      while (code_offset() % BytesPerWord != 0) {
602        _masm->nop();
603      }
604      break;
605    }
606
607    case lir_nop:
608      assert(op->info() == NULL, "not supported");
609      _masm->nop();
610      break;
611
612    case lir_label:
613      Unimplemented();
614      break;
615
616    case lir_build_frame:
617      build_frame();
618      break;
619
620    case lir_std_entry:
621      // init offsets
622      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
623      _masm->align(CodeEntryAlignment);
624      if (needs_icache(compilation()->method())) {
625        check_icache();
626      }
627      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
628      _masm->verified_entry();
629      build_frame();
630      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
631      break;
632
633    case lir_osr_entry:
634      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
635      osr_entry();
636      break;
637
638    case lir_24bit_FPU:
639      set_24bit_FPU();
640      break;
641
642    case lir_reset_FPU:
643      reset_FPU();
644      break;
645
646    case lir_breakpoint:
647      breakpoint();
648      break;
649
650    case lir_fpop_raw:
651      fpop();
652      break;
653
654    case lir_membar:
655      membar();
656      break;
657
658    case lir_membar_acquire:
659      membar_acquire();
660      break;
661
662    case lir_membar_release:
663      membar_release();
664      break;
665
666    case lir_membar_loadload:
667      membar_loadload();
668      break;
669
670    case lir_membar_storestore:
671      membar_storestore();
672      break;
673
674    case lir_membar_loadstore:
675      membar_loadstore();
676      break;
677
678    case lir_membar_storeload:
679      membar_storeload();
680      break;
681
682    case lir_get_thread:
683      get_thread(op->result_opr());
684      break;
685
686    default:
687      ShouldNotReachHere();
688      break;
689  }
690}
691
692
693void LIR_Assembler::emit_op2(LIR_Op2* op) {
694  switch (op->code()) {
695    case lir_cmp:
696      if (op->info() != NULL) {
697        assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
698               "shouldn't be codeemitinfo for non-address operands");
699        add_debug_info_for_null_check_here(op->info()); // exception possible
700      }
701      comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
702      break;
703
704    case lir_cmp_l2i:
705    case lir_cmp_fd2i:
706    case lir_ucmp_fd2i:
707      comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
708      break;
709
710    case lir_cmove:
711      cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
712      break;
713
714    case lir_shl:
715    case lir_shr:
716    case lir_ushr:
717      if (op->in_opr2()->is_constant()) {
718        shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
719      } else {
720        shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
721      }
722      break;
723
724    case lir_add:
725    case lir_sub:
726    case lir_mul:
727    case lir_mul_strictfp:
728    case lir_div:
729    case lir_div_strictfp:
730    case lir_rem:
731      assert(op->fpu_pop_count() < 2, "");
732      arith_op(
733        op->code(),
734        op->in_opr1(),
735        op->in_opr2(),
736        op->result_opr(),
737        op->info(),
738        op->fpu_pop_count() == 1);
739      break;
740
741    case lir_abs:
742    case lir_sqrt:
743    case lir_sin:
744    case lir_tan:
745    case lir_cos:
746    case lir_log:
747    case lir_log10:
748    case lir_exp:
749    case lir_pow:
750      intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
751      break;
752
753    case lir_logic_and:
754    case lir_logic_or:
755    case lir_logic_xor:
756      logic_op(
757        op->code(),
758        op->in_opr1(),
759        op->in_opr2(),
760        op->result_opr());
761      break;
762
763    case lir_throw:
764      throw_op(op->in_opr1(), op->in_opr2(), op->info());
765      break;
766
767    case lir_xadd:
768    case lir_xchg:
769      atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
770      break;
771
772    default:
773      Unimplemented();
774      break;
775  }
776}
777
778
779void LIR_Assembler::build_frame() {
780  _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
781}
782
783
784void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
785  assert((src->is_single_fpu() && dest->is_single_stack()) ||
786         (src->is_double_fpu() && dest->is_double_stack()),
787         "round_fp: rounds register -> stack location");
788
789  reg2stack (src, dest, src->type(), pop_fpu_stack);
790}
791
792
793void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
794  if (src->is_register()) {
795    if (dest->is_register()) {
796      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
797      reg2reg(src,  dest);
798    } else if (dest->is_stack()) {
799      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
800      reg2stack(src, dest, type, pop_fpu_stack);
801    } else if (dest->is_address()) {
802      reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
803    } else {
804      ShouldNotReachHere();
805    }
806
807  } else if (src->is_stack()) {
808    assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
809    if (dest->is_register()) {
810      stack2reg(src, dest, type);
811    } else if (dest->is_stack()) {
812      stack2stack(src, dest, type);
813    } else {
814      ShouldNotReachHere();
815    }
816
817  } else if (src->is_constant()) {
818    if (dest->is_register()) {
819      const2reg(src, dest, patch_code, info); // patching is possible
820    } else if (dest->is_stack()) {
821      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
822      const2stack(src, dest);
823    } else if (dest->is_address()) {
824      assert(patch_code == lir_patch_none, "no patching allowed here");
825      const2mem(src, dest, type, info, wide);
826    } else {
827      ShouldNotReachHere();
828    }
829
830  } else if (src->is_address()) {
831    mem2reg(src, dest, type, patch_code, info, wide, unaligned);
832
833  } else {
834    ShouldNotReachHere();
835  }
836}
837
838
839void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
840#ifndef PRODUCT
841  if (VerifyOops) {
842    OopMapStream s(info->oop_map());
843    while (!s.is_done()) {
844      OopMapValue v = s.current();
845      if (v.is_oop()) {
846        VMReg r = v.reg();
847        if (!r->is_stack()) {
848          stringStream st;
849          st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
850#ifdef SPARC
851          _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
852#else
853          _masm->verify_oop(r->as_Register());
854#endif
855        } else {
856          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
857        }
858      }
859      check_codespace();
860      CHECK_BAILOUT();
861
862      s.next();
863    }
864  }
865#endif
866}
867