c1_LIRAssembler.cpp revision 1472:c18cbe5936b8
10SN/A/*
20SN/A * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
30SN/A * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
40SN/A *
50SN/A * This code is free software; you can redistribute it and/or modify it
62362SN/A * under the terms of the GNU General Public License version 2 only, as
70SN/A * published by the Free Software Foundation.
82362SN/A *
90SN/A * This code is distributed in the hope that it will be useful, but WITHOUT
100SN/A * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
110SN/A * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
120SN/A * version 2 for more details (a copy is included in the LICENSE file that
130SN/A * accompanied this code).
140SN/A *
150SN/A * You should have received a copy of the GNU General Public License version
160SN/A * 2 along with this work; if not, write to the Free Software Foundation,
170SN/A * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
180SN/A *
190SN/A * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
202362SN/A * or visit www.oracle.com if you need additional information or have any
212362SN/A * questions.
222362SN/A *
230SN/A */
240SN/A
250SN/A# include "incls/_precompiled.incl"
260SN/A# include "incls/_c1_LIRAssembler.cpp.incl"
270SN/A
280SN/A
290SN/Avoid LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
300SN/A  // we must have enough patching space so that call can be inserted
310SN/A  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
320SN/A    _masm->nop();
330SN/A  }
340SN/A  patch->install(_masm, patch_code, obj, info);
350SN/A  append_patching_stub(patch);
360SN/A
370SN/A#ifdef ASSERT
380SN/A  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->bci());
390SN/A  if (patch->id() == PatchingStub::access_field_id) {
400SN/A    switch (code) {
410SN/A      case Bytecodes::_putstatic:
4212489Savstepan      case Bytecodes::_getstatic:
430SN/A      case Bytecodes::_putfield:
440SN/A      case Bytecodes::_getfield:
450SN/A        break;
460SN/A      default:
470SN/A        ShouldNotReachHere();
480SN/A    }
4912489Savstepan  } else if (patch->id() == PatchingStub::load_klass_id) {
500SN/A    switch (code) {
510SN/A      case Bytecodes::_putstatic:
520SN/A      case Bytecodes::_getstatic:
530SN/A      case Bytecodes::_new:
540SN/A      case Bytecodes::_anewarray:
550SN/A      case Bytecodes::_multianewarray:
560SN/A      case Bytecodes::_instanceof:
570SN/A      case Bytecodes::_checkcast:
580SN/A      case Bytecodes::_ldc:
590SN/A      case Bytecodes::_ldc_w:
600SN/A        break;
610SN/A      default:
620SN/A        ShouldNotReachHere();
630SN/A    }
640SN/A  } else {
650SN/A    ShouldNotReachHere();
660SN/A  }
670SN/A#endif
680SN/A}
690SN/A
700SN/A
710SN/A//---------------------------------------------------------------
720SN/A
730SN/A
740SN/ALIR_Assembler::LIR_Assembler(Compilation* c):
750SN/A   _compilation(c)
760SN/A , _masm(c->masm())
770SN/A , _bs(Universe::heap()->barrier_set())
780SN/A , _frame_map(c->frame_map())
790SN/A , _current_block(NULL)
800SN/A , _pending_non_safepoint(NULL)
810SN/A , _pending_non_safepoint_offset(0)
820SN/A{
830SN/A  _slow_case_stubs = new CodeStubList();
840SN/A}
850SN/A
860SN/A
870SN/ALIR_Assembler::~LIR_Assembler() {
880SN/A}
890SN/A
900SN/A
910SN/Avoid LIR_Assembler::append_patching_stub(PatchingStub* stub) {
920SN/A  _slow_case_stubs->append(stub);
930SN/A}
940SN/A
950SN/A
960SN/Avoid LIR_Assembler::check_codespace() {
970SN/A  CodeSection* cs = _masm->code_section();
980SN/A  if (cs->remaining() < (int)(1*K)) {
990SN/A    BAILOUT("CodeBuffer overflow");
1000SN/A  }
1010SN/A}
1020SN/A
1030SN/A
1040SN/Avoid LIR_Assembler::emit_code_stub(CodeStub* stub) {
1050SN/A  _slow_case_stubs->append(stub);
1060SN/A}
1070SN/A
1080SN/Avoid LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
1090SN/A  for (int m = 0; m < stub_list->length(); m++) {
1100SN/A    CodeStub* s = (*stub_list)[m];
1110SN/A
1120SN/A    check_codespace();
1130SN/A    CHECK_BAILOUT();
1140SN/A
1150SN/A#ifndef PRODUCT
1160SN/A    if (CommentedAssembly) {
1170SN/A      stringStream st;
1180SN/A      s->print_name(&st);
1190SN/A      st.print(" slow case");
1200SN/A      _masm->block_comment(st.as_string());
1210SN/A    }
1220SN/A#endif
1230SN/A    s->emit_code(this);
1240SN/A#ifdef ASSERT
1250SN/A    s->assert_no_unbound_labels();
1260SN/A#endif
1270SN/A  }
1280SN/A}
1290SN/A
1300SN/A
1310SN/Avoid LIR_Assembler::emit_slow_case_stubs() {
1320SN/A  emit_stubs(_slow_case_stubs);
1330SN/A}
1340SN/A
1350SN/A
1360SN/Abool LIR_Assembler::needs_icache(ciMethod* method) const {
1370SN/A  return !method->is_static();
1380SN/A}
1390SN/A
1400SN/A
1410SN/Aint LIR_Assembler::code_offset() const {
1420SN/A  return _masm->offset();
1430SN/A}
144
145
146address LIR_Assembler::pc() const {
147  return _masm->pc();
148}
149
150
151void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
152  for (int i = 0; i < info_list->length(); i++) {
153    XHandlers* handlers = info_list->at(i)->exception_handlers();
154
155    for (int j = 0; j < handlers->length(); j++) {
156      XHandler* handler = handlers->handler_at(j);
157      assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
158      assert(handler->entry_code() == NULL ||
159             handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
160             handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
161
162      if (handler->entry_pco() == -1) {
163        // entry code not emitted yet
164        if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
165          handler->set_entry_pco(code_offset());
166          if (CommentedAssembly) {
167            _masm->block_comment("Exception adapter block");
168          }
169          emit_lir_list(handler->entry_code());
170        } else {
171          handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
172        }
173
174        assert(handler->entry_pco() != -1, "must be set now");
175      }
176    }
177  }
178}
179
180
181void LIR_Assembler::emit_code(BlockList* hir) {
182  if (PrintLIR) {
183    print_LIR(hir);
184  }
185
186  int n = hir->length();
187  for (int i = 0; i < n; i++) {
188    emit_block(hir->at(i));
189    CHECK_BAILOUT();
190  }
191
192  flush_debug_info(code_offset());
193
194  DEBUG_ONLY(check_no_unbound_labels());
195}
196
197
198void LIR_Assembler::emit_block(BlockBegin* block) {
199  if (block->is_set(BlockBegin::backward_branch_target_flag)) {
200    align_backward_branch_target();
201  }
202
203  // if this block is the start of an exception handler, record the
204  // PC offset of the first instruction for later construction of
205  // the ExceptionHandlerTable
206  if (block->is_set(BlockBegin::exception_entry_flag)) {
207    block->set_exception_handler_pco(code_offset());
208  }
209
210#ifndef PRODUCT
211  if (PrintLIRWithAssembly) {
212    // don't print Phi's
213    InstructionPrinter ip(false);
214    block->print(ip);
215  }
216#endif /* PRODUCT */
217
218  assert(block->lir() != NULL, "must have LIR");
219  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
220
221#ifndef PRODUCT
222  if (CommentedAssembly) {
223    stringStream st;
224    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->bci());
225    _masm->block_comment(st.as_string());
226  }
227#endif
228
229  emit_lir_list(block->lir());
230
231  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
232}
233
234
235void LIR_Assembler::emit_lir_list(LIR_List* list) {
236  peephole(list);
237
238  int n = list->length();
239  for (int i = 0; i < n; i++) {
240    LIR_Op* op = list->at(i);
241
242    check_codespace();
243    CHECK_BAILOUT();
244
245#ifndef PRODUCT
246    if (CommentedAssembly) {
247      // Don't record out every op since that's too verbose.  Print
248      // branches since they include block and stub names.  Also print
249      // patching moves since they generate funny looking code.
250      if (op->code() == lir_branch ||
251          (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
252        stringStream st;
253        op->print_on(&st);
254        _masm->block_comment(st.as_string());
255      }
256    }
257    if (PrintLIRWithAssembly) {
258      // print out the LIR operation followed by the resulting assembly
259      list->at(i)->print(); tty->cr();
260    }
261#endif /* PRODUCT */
262
263    op->emit_code(this);
264
265    if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
266      process_debug_info(op);
267    }
268
269#ifndef PRODUCT
270    if (PrintLIRWithAssembly) {
271      _masm->code()->decode();
272    }
273#endif /* PRODUCT */
274  }
275}
276
277#ifdef ASSERT
278void LIR_Assembler::check_no_unbound_labels() {
279  CHECK_BAILOUT();
280
281  for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
282    if (!_branch_target_blocks.at(i)->label()->is_bound()) {
283      tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
284      assert(false, "unbound label");
285    }
286  }
287}
288#endif
289
290//----------------------------------debug info--------------------------------
291
292
293void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
294  _masm->code_section()->relocate(pc(), relocInfo::poll_type);
295  int pc_offset = code_offset();
296  flush_debug_info(pc_offset);
297  info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
298  if (info->exception_handlers() != NULL) {
299    compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
300  }
301}
302
303
304void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo, bool is_method_handle_invoke) {
305  flush_debug_info(pc_offset);
306  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset, is_method_handle_invoke);
307  if (cinfo->exception_handlers() != NULL) {
308    compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
309  }
310}
311
312static ValueStack* debug_info(Instruction* ins) {
313  StateSplit* ss = ins->as_StateSplit();
314  if (ss != NULL) return ss->state();
315  return ins->lock_stack();
316}
317
318void LIR_Assembler::process_debug_info(LIR_Op* op) {
319  Instruction* src = op->source();
320  if (src == NULL)  return;
321  int pc_offset = code_offset();
322  if (_pending_non_safepoint == src) {
323    _pending_non_safepoint_offset = pc_offset;
324    return;
325  }
326  ValueStack* vstack = debug_info(src);
327  if (vstack == NULL)  return;
328  if (_pending_non_safepoint != NULL) {
329    // Got some old debug info.  Get rid of it.
330    if (_pending_non_safepoint->bci() == src->bci() &&
331        debug_info(_pending_non_safepoint) == vstack) {
332      _pending_non_safepoint_offset = pc_offset;
333      return;
334    }
335    if (_pending_non_safepoint_offset < pc_offset) {
336      record_non_safepoint_debug_info();
337    }
338    _pending_non_safepoint = NULL;
339  }
340  // Remember the debug info.
341  if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
342    _pending_non_safepoint = src;
343    _pending_non_safepoint_offset = pc_offset;
344  }
345}
346
347// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
348// Return NULL if n is too large.
349// Returns the caller_bci for the next-younger state, also.
350static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
351  ValueStack* t = s;
352  for (int i = 0; i < n; i++) {
353    if (t == NULL)  break;
354    t = t->caller_state();
355  }
356  if (t == NULL)  return NULL;
357  for (;;) {
358    ValueStack* tc = t->caller_state();
359    if (tc == NULL)  return s;
360    t = tc;
361    bci_result = s->scope()->caller_bci();
362    s = s->caller_state();
363  }
364}
365
366void LIR_Assembler::record_non_safepoint_debug_info() {
367  int         pc_offset = _pending_non_safepoint_offset;
368  ValueStack* vstack    = debug_info(_pending_non_safepoint);
369  int         bci       = _pending_non_safepoint->bci();
370
371  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
372  assert(debug_info->recording_non_safepoints(), "sanity");
373
374  debug_info->add_non_safepoint(pc_offset);
375
376  // Visit scopes from oldest to youngest.
377  for (int n = 0; ; n++) {
378    int s_bci = bci;
379    ValueStack* s = nth_oldest(vstack, n, s_bci);
380    if (s == NULL)  break;
381    IRScope* scope = s->scope();
382    //Always pass false for reexecute since these ScopeDescs are never used for deopt
383    debug_info->describe_scope(pc_offset, scope->method(), s_bci, false/*reexecute*/);
384  }
385
386  debug_info->end_non_safepoint(pc_offset);
387}
388
389
390void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
391  add_debug_info_for_null_check(code_offset(), cinfo);
392}
393
394void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
395  ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
396  emit_code_stub(stub);
397}
398
399void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
400  add_debug_info_for_div0(code_offset(), info);
401}
402
403void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
404  DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
405  emit_code_stub(stub);
406}
407
408void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
409  rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
410}
411
412
413void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
414  verify_oop_map(op->info());
415
416  // JSR 292
417  // Preserve the SP over MethodHandle call sites.
418  if (op->is_method_handle_invoke()) {
419    preserve_SP(op);
420  }
421
422  if (os::is_MP()) {
423    // must align calls sites, otherwise they can't be updated atomically on MP hardware
424    align_call(op->code());
425  }
426
427  // emit the static call stub stuff out of line
428  emit_static_call_stub();
429
430  switch (op->code()) {
431  case lir_static_call:
432    call(op, relocInfo::static_call_type);
433    break;
434  case lir_optvirtual_call:
435  case lir_dynamic_call:
436    call(op, relocInfo::opt_virtual_call_type);
437    break;
438  case lir_icvirtual_call:
439    ic_call(op);
440    break;
441  case lir_virtual_call:
442    vtable_call(op);
443    break;
444  default: ShouldNotReachHere();
445  }
446
447  if (op->is_method_handle_invoke()) {
448    restore_SP(op);
449  }
450
451#if defined(X86) && defined(TIERED)
452  // C2 leave fpu stack dirty clean it
453  if (UseSSE < 2) {
454    int i;
455    for ( i = 1; i <= 7 ; i++ ) {
456      ffree(i);
457    }
458    if (!op->result_opr()->is_float_kind()) {
459      ffree(0);
460    }
461  }
462#endif // X86 && TIERED
463}
464
465
466void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
467  _masm->bind (*(op->label()));
468}
469
470
471void LIR_Assembler::emit_op1(LIR_Op1* op) {
472  switch (op->code()) {
473    case lir_move:
474      if (op->move_kind() == lir_move_volatile) {
475        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
476        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
477      } else {
478        move_op(op->in_opr(), op->result_opr(), op->type(),
479                op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
480      }
481      break;
482
483    case lir_prefetchr:
484      prefetchr(op->in_opr());
485      break;
486
487    case lir_prefetchw:
488      prefetchw(op->in_opr());
489      break;
490
491    case lir_roundfp: {
492      LIR_OpRoundFP* round_op = op->as_OpRoundFP();
493      roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
494      break;
495    }
496
497    case lir_return:
498      return_op(op->in_opr());
499      break;
500
501    case lir_safepoint:
502      if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
503        _masm->nop();
504      }
505      safepoint_poll(op->in_opr(), op->info());
506      break;
507
508    case lir_fxch:
509      fxch(op->in_opr()->as_jint());
510      break;
511
512    case lir_fld:
513      fld(op->in_opr()->as_jint());
514      break;
515
516    case lir_ffree:
517      ffree(op->in_opr()->as_jint());
518      break;
519
520    case lir_branch:
521      break;
522
523    case lir_push:
524      push(op->in_opr());
525      break;
526
527    case lir_pop:
528      pop(op->in_opr());
529      break;
530
531    case lir_neg:
532      negate(op->in_opr(), op->result_opr());
533      break;
534
535    case lir_leal:
536      leal(op->in_opr(), op->result_opr());
537      break;
538
539    case lir_null_check:
540      if (GenerateCompilerNullChecks) {
541        add_debug_info_for_null_check_here(op->info());
542
543        if (op->in_opr()->is_single_cpu()) {
544          _masm->null_check(op->in_opr()->as_register());
545        } else {
546          Unimplemented();
547        }
548      }
549      break;
550
551    case lir_monaddr:
552      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
553      break;
554
555    case lir_unwind:
556      unwind_op(op->in_opr());
557      break;
558
559    default:
560      Unimplemented();
561      break;
562  }
563}
564
565
566void LIR_Assembler::emit_op0(LIR_Op0* op) {
567  switch (op->code()) {
568    case lir_word_align: {
569      while (code_offset() % BytesPerWord != 0) {
570        _masm->nop();
571      }
572      break;
573    }
574
575    case lir_nop:
576      assert(op->info() == NULL, "not supported");
577      _masm->nop();
578      break;
579
580    case lir_label:
581      Unimplemented();
582      break;
583
584    case lir_build_frame:
585      build_frame();
586      break;
587
588    case lir_std_entry:
589      // init offsets
590      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
591      _masm->align(CodeEntryAlignment);
592      if (needs_icache(compilation()->method())) {
593        check_icache();
594      }
595      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
596      _masm->verified_entry();
597      build_frame();
598      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
599      break;
600
601    case lir_osr_entry:
602      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
603      osr_entry();
604      break;
605
606    case lir_24bit_FPU:
607      set_24bit_FPU();
608      break;
609
610    case lir_reset_FPU:
611      reset_FPU();
612      break;
613
614    case lir_breakpoint:
615      breakpoint();
616      break;
617
618    case lir_fpop_raw:
619      fpop();
620      break;
621
622    case lir_membar:
623      membar();
624      break;
625
626    case lir_membar_acquire:
627      membar_acquire();
628      break;
629
630    case lir_membar_release:
631      membar_release();
632      break;
633
634    case lir_get_thread:
635      get_thread(op->result_opr());
636      break;
637
638    default:
639      ShouldNotReachHere();
640      break;
641  }
642}
643
644
645void LIR_Assembler::emit_op2(LIR_Op2* op) {
646  switch (op->code()) {
647    case lir_cmp:
648      if (op->info() != NULL) {
649        assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
650               "shouldn't be codeemitinfo for non-address operands");
651        add_debug_info_for_null_check_here(op->info()); // exception possible
652      }
653      comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
654      break;
655
656    case lir_cmp_l2i:
657    case lir_cmp_fd2i:
658    case lir_ucmp_fd2i:
659      comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
660      break;
661
662    case lir_cmove:
663      cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
664      break;
665
666    case lir_shl:
667    case lir_shr:
668    case lir_ushr:
669      if (op->in_opr2()->is_constant()) {
670        shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
671      } else {
672        shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
673      }
674      break;
675
676    case lir_add:
677    case lir_sub:
678    case lir_mul:
679    case lir_mul_strictfp:
680    case lir_div:
681    case lir_div_strictfp:
682    case lir_rem:
683      assert(op->fpu_pop_count() < 2, "");
684      arith_op(
685        op->code(),
686        op->in_opr1(),
687        op->in_opr2(),
688        op->result_opr(),
689        op->info(),
690        op->fpu_pop_count() == 1);
691      break;
692
693    case lir_abs:
694    case lir_sqrt:
695    case lir_sin:
696    case lir_tan:
697    case lir_cos:
698    case lir_log:
699    case lir_log10:
700      intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
701      break;
702
703    case lir_logic_and:
704    case lir_logic_or:
705    case lir_logic_xor:
706      logic_op(
707        op->code(),
708        op->in_opr1(),
709        op->in_opr2(),
710        op->result_opr());
711      break;
712
713    case lir_throw:
714      throw_op(op->in_opr1(), op->in_opr2(), op->info());
715      break;
716
717    default:
718      Unimplemented();
719      break;
720  }
721}
722
723
724void LIR_Assembler::build_frame() {
725  _masm->build_frame(initial_frame_size_in_bytes());
726}
727
728
729void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
730  assert((src->is_single_fpu() && dest->is_single_stack()) ||
731         (src->is_double_fpu() && dest->is_double_stack()),
732         "round_fp: rounds register -> stack location");
733
734  reg2stack (src, dest, src->type(), pop_fpu_stack);
735}
736
737
738void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
739  if (src->is_register()) {
740    if (dest->is_register()) {
741      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
742      reg2reg(src,  dest);
743    } else if (dest->is_stack()) {
744      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
745      reg2stack(src, dest, type, pop_fpu_stack);
746    } else if (dest->is_address()) {
747      reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
748    } else {
749      ShouldNotReachHere();
750    }
751
752  } else if (src->is_stack()) {
753    assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
754    if (dest->is_register()) {
755      stack2reg(src, dest, type);
756    } else if (dest->is_stack()) {
757      stack2stack(src, dest, type);
758    } else {
759      ShouldNotReachHere();
760    }
761
762  } else if (src->is_constant()) {
763    if (dest->is_register()) {
764      const2reg(src, dest, patch_code, info); // patching is possible
765    } else if (dest->is_stack()) {
766      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
767      const2stack(src, dest);
768    } else if (dest->is_address()) {
769      assert(patch_code == lir_patch_none, "no patching allowed here");
770      const2mem(src, dest, type, info);
771    } else {
772      ShouldNotReachHere();
773    }
774
775  } else if (src->is_address()) {
776    mem2reg(src, dest, type, patch_code, info, unaligned);
777
778  } else {
779    ShouldNotReachHere();
780  }
781}
782
783
784void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
785#ifndef PRODUCT
786  if (VerifyOopMaps || VerifyOops) {
787    bool v = VerifyOops;
788    VerifyOops = true;
789    OopMapStream s(info->oop_map());
790    while (!s.is_done()) {
791      OopMapValue v = s.current();
792      if (v.is_oop()) {
793        VMReg r = v.reg();
794        if (!r->is_stack()) {
795          stringStream st;
796          st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
797#ifdef SPARC
798          _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
799#else
800          _masm->verify_oop(r->as_Register());
801#endif
802        } else {
803          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
804        }
805      }
806      s.next();
807    }
808    VerifyOops = v;
809  }
810#endif
811}
812