c1_LIRAssembler.cpp revision 1879:f95d63e2154a
1/*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Compilation.hpp"
27#include "c1/c1_Instruction.hpp"
28#include "c1/c1_InstructionPrinter.hpp"
29#include "c1/c1_LIRAssembler.hpp"
30#include "c1/c1_MacroAssembler.hpp"
31#include "c1/c1_ValueStack.hpp"
32#include "ci/ciInstance.hpp"
33#ifdef TARGET_ARCH_x86
34# include "nativeInst_x86.hpp"
35# include "vmreg_x86.inline.hpp"
36#endif
37#ifdef TARGET_ARCH_sparc
38# include "nativeInst_sparc.hpp"
39# include "vmreg_sparc.inline.hpp"
40#endif
41#ifdef TARGET_ARCH_zero
42# include "nativeInst_zero.hpp"
43# include "vmreg_zero.inline.hpp"
44#endif
45
46
47void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
48  // we must have enough patching space so that call can be inserted
49  while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeCall::instruction_size) {
50    _masm->nop();
51  }
52  patch->install(_masm, patch_code, obj, info);
53  append_patching_stub(patch);
54
55#ifdef ASSERT
56  Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
57  if (patch->id() == PatchingStub::access_field_id) {
58    switch (code) {
59      case Bytecodes::_putstatic:
60      case Bytecodes::_getstatic:
61      case Bytecodes::_putfield:
62      case Bytecodes::_getfield:
63        break;
64      default:
65        ShouldNotReachHere();
66    }
67  } else if (patch->id() == PatchingStub::load_klass_id) {
68    switch (code) {
69      case Bytecodes::_putstatic:
70      case Bytecodes::_getstatic:
71      case Bytecodes::_new:
72      case Bytecodes::_anewarray:
73      case Bytecodes::_multianewarray:
74      case Bytecodes::_instanceof:
75      case Bytecodes::_checkcast:
76      case Bytecodes::_ldc:
77      case Bytecodes::_ldc_w:
78        break;
79      default:
80        ShouldNotReachHere();
81    }
82  } else {
83    ShouldNotReachHere();
84  }
85#endif
86}
87
88
89//---------------------------------------------------------------
90
91
92LIR_Assembler::LIR_Assembler(Compilation* c):
93   _compilation(c)
94 , _masm(c->masm())
95 , _bs(Universe::heap()->barrier_set())
96 , _frame_map(c->frame_map())
97 , _current_block(NULL)
98 , _pending_non_safepoint(NULL)
99 , _pending_non_safepoint_offset(0)
100{
101  _slow_case_stubs = new CodeStubList();
102}
103
104
105LIR_Assembler::~LIR_Assembler() {
106}
107
108
109void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
110  _slow_case_stubs->append(stub);
111}
112
113
114void LIR_Assembler::check_codespace() {
115  CodeSection* cs = _masm->code_section();
116  if (cs->remaining() < (int)(1*K)) {
117    BAILOUT("CodeBuffer overflow");
118  }
119}
120
121
122void LIR_Assembler::emit_code_stub(CodeStub* stub) {
123  _slow_case_stubs->append(stub);
124}
125
126void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
127  for (int m = 0; m < stub_list->length(); m++) {
128    CodeStub* s = (*stub_list)[m];
129
130    check_codespace();
131    CHECK_BAILOUT();
132
133#ifndef PRODUCT
134    if (CommentedAssembly) {
135      stringStream st;
136      s->print_name(&st);
137      st.print(" slow case");
138      _masm->block_comment(st.as_string());
139    }
140#endif
141    s->emit_code(this);
142#ifdef ASSERT
143    s->assert_no_unbound_labels();
144#endif
145  }
146}
147
148
149void LIR_Assembler::emit_slow_case_stubs() {
150  emit_stubs(_slow_case_stubs);
151}
152
153
154bool LIR_Assembler::needs_icache(ciMethod* method) const {
155  return !method->is_static();
156}
157
158
159int LIR_Assembler::code_offset() const {
160  return _masm->offset();
161}
162
163
164address LIR_Assembler::pc() const {
165  return _masm->pc();
166}
167
168
169void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
170  for (int i = 0; i < info_list->length(); i++) {
171    XHandlers* handlers = info_list->at(i)->exception_handlers();
172
173    for (int j = 0; j < handlers->length(); j++) {
174      XHandler* handler = handlers->handler_at(j);
175      assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
176      assert(handler->entry_code() == NULL ||
177             handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
178             handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
179
180      if (handler->entry_pco() == -1) {
181        // entry code not emitted yet
182        if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
183          handler->set_entry_pco(code_offset());
184          if (CommentedAssembly) {
185            _masm->block_comment("Exception adapter block");
186          }
187          emit_lir_list(handler->entry_code());
188        } else {
189          handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
190        }
191
192        assert(handler->entry_pco() != -1, "must be set now");
193      }
194    }
195  }
196}
197
198
199void LIR_Assembler::emit_code(BlockList* hir) {
200  if (PrintLIR) {
201    print_LIR(hir);
202  }
203
204  int n = hir->length();
205  for (int i = 0; i < n; i++) {
206    emit_block(hir->at(i));
207    CHECK_BAILOUT();
208  }
209
210  flush_debug_info(code_offset());
211
212  DEBUG_ONLY(check_no_unbound_labels());
213}
214
215
216void LIR_Assembler::emit_block(BlockBegin* block) {
217  if (block->is_set(BlockBegin::backward_branch_target_flag)) {
218    align_backward_branch_target();
219  }
220
221  // if this block is the start of an exception handler, record the
222  // PC offset of the first instruction for later construction of
223  // the ExceptionHandlerTable
224  if (block->is_set(BlockBegin::exception_entry_flag)) {
225    block->set_exception_handler_pco(code_offset());
226  }
227
228#ifndef PRODUCT
229  if (PrintLIRWithAssembly) {
230    // don't print Phi's
231    InstructionPrinter ip(false);
232    block->print(ip);
233  }
234#endif /* PRODUCT */
235
236  assert(block->lir() != NULL, "must have LIR");
237  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
238
239#ifndef PRODUCT
240  if (CommentedAssembly) {
241    stringStream st;
242    st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
243    _masm->block_comment(st.as_string());
244  }
245#endif
246
247  emit_lir_list(block->lir());
248
249  X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
250}
251
252
253void LIR_Assembler::emit_lir_list(LIR_List* list) {
254  peephole(list);
255
256  int n = list->length();
257  for (int i = 0; i < n; i++) {
258    LIR_Op* op = list->at(i);
259
260    check_codespace();
261    CHECK_BAILOUT();
262
263#ifndef PRODUCT
264    if (CommentedAssembly) {
265      // Don't record out every op since that's too verbose.  Print
266      // branches since they include block and stub names.  Also print
267      // patching moves since they generate funny looking code.
268      if (op->code() == lir_branch ||
269          (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none)) {
270        stringStream st;
271        op->print_on(&st);
272        _masm->block_comment(st.as_string());
273      }
274    }
275    if (PrintLIRWithAssembly) {
276      // print out the LIR operation followed by the resulting assembly
277      list->at(i)->print(); tty->cr();
278    }
279#endif /* PRODUCT */
280
281    op->emit_code(this);
282
283    if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
284      process_debug_info(op);
285    }
286
287#ifndef PRODUCT
288    if (PrintLIRWithAssembly) {
289      _masm->code()->decode();
290    }
291#endif /* PRODUCT */
292  }
293}
294
295#ifdef ASSERT
296void LIR_Assembler::check_no_unbound_labels() {
297  CHECK_BAILOUT();
298
299  for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
300    if (!_branch_target_blocks.at(i)->label()->is_bound()) {
301      tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
302      assert(false, "unbound label");
303    }
304  }
305}
306#endif
307
308//----------------------------------debug info--------------------------------
309
310
311void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
312  _masm->code_section()->relocate(pc(), relocInfo::poll_type);
313  int pc_offset = code_offset();
314  flush_debug_info(pc_offset);
315  info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
316  if (info->exception_handlers() != NULL) {
317    compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
318  }
319}
320
321
322void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
323  flush_debug_info(pc_offset);
324  cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
325  if (cinfo->exception_handlers() != NULL) {
326    compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
327  }
328}
329
330static ValueStack* debug_info(Instruction* ins) {
331  StateSplit* ss = ins->as_StateSplit();
332  if (ss != NULL) return ss->state();
333  return ins->state_before();
334}
335
336void LIR_Assembler::process_debug_info(LIR_Op* op) {
337  Instruction* src = op->source();
338  if (src == NULL)  return;
339  int pc_offset = code_offset();
340  if (_pending_non_safepoint == src) {
341    _pending_non_safepoint_offset = pc_offset;
342    return;
343  }
344  ValueStack* vstack = debug_info(src);
345  if (vstack == NULL)  return;
346  if (_pending_non_safepoint != NULL) {
347    // Got some old debug info.  Get rid of it.
348    if (debug_info(_pending_non_safepoint) == vstack) {
349      _pending_non_safepoint_offset = pc_offset;
350      return;
351    }
352    if (_pending_non_safepoint_offset < pc_offset) {
353      record_non_safepoint_debug_info();
354    }
355    _pending_non_safepoint = NULL;
356  }
357  // Remember the debug info.
358  if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
359    _pending_non_safepoint = src;
360    _pending_non_safepoint_offset = pc_offset;
361  }
362}
363
364// Index caller states in s, where 0 is the oldest, 1 its callee, etc.
365// Return NULL if n is too large.
366// Returns the caller_bci for the next-younger state, also.
367static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
368  ValueStack* t = s;
369  for (int i = 0; i < n; i++) {
370    if (t == NULL)  break;
371    t = t->caller_state();
372  }
373  if (t == NULL)  return NULL;
374  for (;;) {
375    ValueStack* tc = t->caller_state();
376    if (tc == NULL)  return s;
377    t = tc;
378    bci_result = tc->bci();
379    s = s->caller_state();
380  }
381}
382
383void LIR_Assembler::record_non_safepoint_debug_info() {
384  int         pc_offset = _pending_non_safepoint_offset;
385  ValueStack* vstack    = debug_info(_pending_non_safepoint);
386  int         bci       = vstack->bci();
387
388  DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
389  assert(debug_info->recording_non_safepoints(), "sanity");
390
391  debug_info->add_non_safepoint(pc_offset);
392
393  // Visit scopes from oldest to youngest.
394  for (int n = 0; ; n++) {
395    int s_bci = bci;
396    ValueStack* s = nth_oldest(vstack, n, s_bci);
397    if (s == NULL)  break;
398    IRScope* scope = s->scope();
399    //Always pass false for reexecute since these ScopeDescs are never used for deopt
400    debug_info->describe_scope(pc_offset, scope->method(), s->bci(), false/*reexecute*/);
401  }
402
403  debug_info->end_non_safepoint(pc_offset);
404}
405
406
407void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
408  add_debug_info_for_null_check(code_offset(), cinfo);
409}
410
411void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
412  ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
413  emit_code_stub(stub);
414}
415
416void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
417  add_debug_info_for_div0(code_offset(), info);
418}
419
420void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
421  DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
422  emit_code_stub(stub);
423}
424
425void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
426  rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
427}
428
429
430void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
431  verify_oop_map(op->info());
432
433  if (os::is_MP()) {
434    // must align calls sites, otherwise they can't be updated atomically on MP hardware
435    align_call(op->code());
436  }
437
438  // emit the static call stub stuff out of line
439  emit_static_call_stub();
440
441  switch (op->code()) {
442  case lir_static_call:
443    call(op, relocInfo::static_call_type);
444    break;
445  case lir_optvirtual_call:
446  case lir_dynamic_call:
447    call(op, relocInfo::opt_virtual_call_type);
448    break;
449  case lir_icvirtual_call:
450    ic_call(op);
451    break;
452  case lir_virtual_call:
453    vtable_call(op);
454    break;
455  default: ShouldNotReachHere();
456  }
457
458  // JSR 292
459  // Record if this method has MethodHandle invokes.
460  if (op->is_method_handle_invoke()) {
461    compilation()->set_has_method_handle_invokes(true);
462  }
463
464#if defined(X86) && defined(TIERED)
465  // C2 leave fpu stack dirty clean it
466  if (UseSSE < 2) {
467    int i;
468    for ( i = 1; i <= 7 ; i++ ) {
469      ffree(i);
470    }
471    if (!op->result_opr()->is_float_kind()) {
472      ffree(0);
473    }
474  }
475#endif // X86 && TIERED
476}
477
478
479void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
480  _masm->bind (*(op->label()));
481}
482
483
484void LIR_Assembler::emit_op1(LIR_Op1* op) {
485  switch (op->code()) {
486    case lir_move:
487      if (op->move_kind() == lir_move_volatile) {
488        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
489        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
490      } else {
491        move_op(op->in_opr(), op->result_opr(), op->type(),
492                op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
493      }
494      break;
495
496    case lir_prefetchr:
497      prefetchr(op->in_opr());
498      break;
499
500    case lir_prefetchw:
501      prefetchw(op->in_opr());
502      break;
503
504    case lir_roundfp: {
505      LIR_OpRoundFP* round_op = op->as_OpRoundFP();
506      roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
507      break;
508    }
509
510    case lir_return:
511      return_op(op->in_opr());
512      break;
513
514    case lir_safepoint:
515      if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
516        _masm->nop();
517      }
518      safepoint_poll(op->in_opr(), op->info());
519      break;
520
521    case lir_fxch:
522      fxch(op->in_opr()->as_jint());
523      break;
524
525    case lir_fld:
526      fld(op->in_opr()->as_jint());
527      break;
528
529    case lir_ffree:
530      ffree(op->in_opr()->as_jint());
531      break;
532
533    case lir_branch:
534      break;
535
536    case lir_push:
537      push(op->in_opr());
538      break;
539
540    case lir_pop:
541      pop(op->in_opr());
542      break;
543
544    case lir_neg:
545      negate(op->in_opr(), op->result_opr());
546      break;
547
548    case lir_leal:
549      leal(op->in_opr(), op->result_opr());
550      break;
551
552    case lir_null_check:
553      if (GenerateCompilerNullChecks) {
554        add_debug_info_for_null_check_here(op->info());
555
556        if (op->in_opr()->is_single_cpu()) {
557          _masm->null_check(op->in_opr()->as_register());
558        } else {
559          Unimplemented();
560        }
561      }
562      break;
563
564    case lir_monaddr:
565      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
566      break;
567
568#ifdef SPARC
569    case lir_pack64:
570      pack64(op->in_opr(), op->result_opr());
571      break;
572
573    case lir_unpack64:
574      unpack64(op->in_opr(), op->result_opr());
575      break;
576#endif
577
578    case lir_unwind:
579      unwind_op(op->in_opr());
580      break;
581
582    default:
583      Unimplemented();
584      break;
585  }
586}
587
588
589void LIR_Assembler::emit_op0(LIR_Op0* op) {
590  switch (op->code()) {
591    case lir_word_align: {
592      while (code_offset() % BytesPerWord != 0) {
593        _masm->nop();
594      }
595      break;
596    }
597
598    case lir_nop:
599      assert(op->info() == NULL, "not supported");
600      _masm->nop();
601      break;
602
603    case lir_label:
604      Unimplemented();
605      break;
606
607    case lir_build_frame:
608      build_frame();
609      break;
610
611    case lir_std_entry:
612      // init offsets
613      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
614      _masm->align(CodeEntryAlignment);
615      if (needs_icache(compilation()->method())) {
616        check_icache();
617      }
618      offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
619      _masm->verified_entry();
620      build_frame();
621      offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
622      break;
623
624    case lir_osr_entry:
625      offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
626      osr_entry();
627      break;
628
629    case lir_24bit_FPU:
630      set_24bit_FPU();
631      break;
632
633    case lir_reset_FPU:
634      reset_FPU();
635      break;
636
637    case lir_breakpoint:
638      breakpoint();
639      break;
640
641    case lir_fpop_raw:
642      fpop();
643      break;
644
645    case lir_membar:
646      membar();
647      break;
648
649    case lir_membar_acquire:
650      membar_acquire();
651      break;
652
653    case lir_membar_release:
654      membar_release();
655      break;
656
657    case lir_get_thread:
658      get_thread(op->result_opr());
659      break;
660
661    default:
662      ShouldNotReachHere();
663      break;
664  }
665}
666
667
668void LIR_Assembler::emit_op2(LIR_Op2* op) {
669  switch (op->code()) {
670    case lir_cmp:
671      if (op->info() != NULL) {
672        assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
673               "shouldn't be codeemitinfo for non-address operands");
674        add_debug_info_for_null_check_here(op->info()); // exception possible
675      }
676      comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
677      break;
678
679    case lir_cmp_l2i:
680    case lir_cmp_fd2i:
681    case lir_ucmp_fd2i:
682      comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
683      break;
684
685    case lir_cmove:
686      cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr());
687      break;
688
689    case lir_shl:
690    case lir_shr:
691    case lir_ushr:
692      if (op->in_opr2()->is_constant()) {
693        shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
694      } else {
695        shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp_opr());
696      }
697      break;
698
699    case lir_add:
700    case lir_sub:
701    case lir_mul:
702    case lir_mul_strictfp:
703    case lir_div:
704    case lir_div_strictfp:
705    case lir_rem:
706      assert(op->fpu_pop_count() < 2, "");
707      arith_op(
708        op->code(),
709        op->in_opr1(),
710        op->in_opr2(),
711        op->result_opr(),
712        op->info(),
713        op->fpu_pop_count() == 1);
714      break;
715
716    case lir_abs:
717    case lir_sqrt:
718    case lir_sin:
719    case lir_tan:
720    case lir_cos:
721    case lir_log:
722    case lir_log10:
723      intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
724      break;
725
726    case lir_logic_and:
727    case lir_logic_or:
728    case lir_logic_xor:
729      logic_op(
730        op->code(),
731        op->in_opr1(),
732        op->in_opr2(),
733        op->result_opr());
734      break;
735
736    case lir_throw:
737      throw_op(op->in_opr1(), op->in_opr2(), op->info());
738      break;
739
740    default:
741      Unimplemented();
742      break;
743  }
744}
745
746
747void LIR_Assembler::build_frame() {
748  _masm->build_frame(initial_frame_size_in_bytes());
749}
750
751
752void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
753  assert((src->is_single_fpu() && dest->is_single_stack()) ||
754         (src->is_double_fpu() && dest->is_double_stack()),
755         "round_fp: rounds register -> stack location");
756
757  reg2stack (src, dest, src->type(), pop_fpu_stack);
758}
759
760
761void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
762  if (src->is_register()) {
763    if (dest->is_register()) {
764      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
765      reg2reg(src,  dest);
766    } else if (dest->is_stack()) {
767      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
768      reg2stack(src, dest, type, pop_fpu_stack);
769    } else if (dest->is_address()) {
770      reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
771    } else {
772      ShouldNotReachHere();
773    }
774
775  } else if (src->is_stack()) {
776    assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
777    if (dest->is_register()) {
778      stack2reg(src, dest, type);
779    } else if (dest->is_stack()) {
780      stack2stack(src, dest, type);
781    } else {
782      ShouldNotReachHere();
783    }
784
785  } else if (src->is_constant()) {
786    if (dest->is_register()) {
787      const2reg(src, dest, patch_code, info); // patching is possible
788    } else if (dest->is_stack()) {
789      assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
790      const2stack(src, dest);
791    } else if (dest->is_address()) {
792      assert(patch_code == lir_patch_none, "no patching allowed here");
793      const2mem(src, dest, type, info);
794    } else {
795      ShouldNotReachHere();
796    }
797
798  } else if (src->is_address()) {
799    mem2reg(src, dest, type, patch_code, info, unaligned);
800
801  } else {
802    ShouldNotReachHere();
803  }
804}
805
806
807void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
808#ifndef PRODUCT
809  if (VerifyOopMaps || VerifyOops) {
810    bool v = VerifyOops;
811    VerifyOops = true;
812    OopMapStream s(info->oop_map());
813    while (!s.is_done()) {
814      OopMapValue v = s.current();
815      if (v.is_oop()) {
816        VMReg r = v.reg();
817        if (!r->is_stack()) {
818          stringStream st;
819          st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
820#ifdef SPARC
821          _masm->_verify_oop(r->as_Register(), strdup(st.as_string()), __FILE__, __LINE__);
822#else
823          _masm->verify_oop(r->as_Register());
824#endif
825        } else {
826          _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
827        }
828      }
829      s.next();
830    }
831    VerifyOops = v;
832  }
833#endif
834}
835