os_linux_aarch64.cpp revision 10835:a6b1b83401c7
1/*
2 * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// no precompiled headers
27#include "asm/macroAssembler.hpp"
28#include "classfile/classLoader.hpp"
29#include "classfile/systemDictionary.hpp"
30#include "classfile/vmSymbols.hpp"
31#include "code/codeCache.hpp"
32#include "code/icBuffer.hpp"
33#include "code/vtableStubs.hpp"
34#include "code/nativeInst.hpp"
35#include "interpreter/interpreter.hpp"
36#include "jvm_linux.h"
37#include "memory/allocation.inline.hpp"
38#include "mutex_linux.inline.hpp"
39#include "os_share_linux.hpp"
40#include "prims/jniFastGetField.hpp"
41#include "prims/jvm.h"
42#include "prims/jvm_misc.hpp"
43#include "runtime/arguments.hpp"
44#include "runtime/extendedPC.hpp"
45#include "runtime/frame.inline.hpp"
46#include "runtime/interfaceSupport.hpp"
47#include "runtime/java.hpp"
48#include "runtime/javaCalls.hpp"
49#include "runtime/mutexLocker.hpp"
50#include "runtime/osThread.hpp"
51#include "runtime/sharedRuntime.hpp"
52#include "runtime/stubRoutines.hpp"
53#include "runtime/thread.inline.hpp"
54#include "runtime/timer.hpp"
55#include "utilities/events.hpp"
56#include "utilities/vmError.hpp"
57#ifdef BUILTIN_SIM
58#include "../../../../../../simulator/simulator.hpp"
59#endif
60
61// put OS-includes here
62# include <sys/types.h>
63# include <sys/mman.h>
64# include <pthread.h>
65# include <signal.h>
66# include <errno.h>
67# include <dlfcn.h>
68# include <stdlib.h>
69# include <stdio.h>
70# include <unistd.h>
71# include <sys/resource.h>
72# include <pthread.h>
73# include <sys/stat.h>
74# include <sys/time.h>
75# include <sys/utsname.h>
76# include <sys/socket.h>
77# include <sys/wait.h>
78# include <pwd.h>
79# include <poll.h>
80# include <ucontext.h>
81# include <fpu_control.h>
82
83#ifdef BUILTIN_SIM
84#define REG_SP REG_RSP
85#define REG_PC REG_RIP
86#define REG_FP REG_RBP
87#define SPELL_REG_SP "rsp"
88#define SPELL_REG_FP "rbp"
89#else
90#define REG_FP 29
91
92#define SPELL_REG_SP "sp"
93#define SPELL_REG_FP "x29"
94#endif
95
96address os::current_stack_pointer() {
97  register void *esp __asm__ (SPELL_REG_SP);
98  return (address) esp;
99}
100
101char* os::non_memory_address_word() {
102  // Must never look like an address returned by reserve_memory,
103  // even in its subfields (as defined by the CPU immediate fields,
104  // if the CPU splits constants across multiple instructions).
105
106  return (char*) 0xffffffffffff;
107}
108
109void os::initialize_thread(Thread *thr) {
110}
111
112address os::Linux::ucontext_get_pc(const ucontext_t * uc) {
113#ifdef BUILTIN_SIM
114  return (address)uc->uc_mcontext.gregs[REG_PC];
115#else
116  return (address)uc->uc_mcontext.pc;
117#endif
118}
119
120void os::Linux::ucontext_set_pc(ucontext_t * uc, address pc) {
121#ifdef BUILTIN_SIM
122  uc->uc_mcontext.gregs[REG_PC] = (intptr_t)pc;
123#else
124  uc->uc_mcontext.pc = (intptr_t)pc;
125#endif
126}
127
128intptr_t* os::Linux::ucontext_get_sp(const ucontext_t * uc) {
129#ifdef BUILTIN_SIM
130  return (intptr_t*)uc->uc_mcontext.gregs[REG_SP];
131#else
132  return (intptr_t*)uc->uc_mcontext.sp;
133#endif
134}
135
136intptr_t* os::Linux::ucontext_get_fp(const ucontext_t * uc) {
137#ifdef BUILTIN_SIM
138  return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
139#else
140  return (intptr_t*)uc->uc_mcontext.regs[REG_FP];
141#endif
142}
143
144// For Forte Analyzer AsyncGetCallTrace profiling support - thread
145// is currently interrupted by SIGPROF.
146// os::Solaris::fetch_frame_from_ucontext() tries to skip nested signal
147// frames. Currently we don't do that on Linux, so it's the same as
148// os::fetch_frame_from_context().
149ExtendedPC os::Linux::fetch_frame_from_ucontext(Thread* thread,
150  const ucontext_t* uc, intptr_t** ret_sp, intptr_t** ret_fp) {
151
152  assert(thread != NULL, "just checking");
153  assert(ret_sp != NULL, "just checking");
154  assert(ret_fp != NULL, "just checking");
155
156  return os::fetch_frame_from_context(uc, ret_sp, ret_fp);
157}
158
159ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
160                    intptr_t** ret_sp, intptr_t** ret_fp) {
161
162  ExtendedPC  epc;
163  const ucontext_t* uc = (const ucontext_t*)ucVoid;
164
165  if (uc != NULL) {
166    epc = ExtendedPC(os::Linux::ucontext_get_pc(uc));
167    if (ret_sp) *ret_sp = os::Linux::ucontext_get_sp(uc);
168    if (ret_fp) *ret_fp = os::Linux::ucontext_get_fp(uc);
169  } else {
170    // construct empty ExtendedPC for return value checking
171    epc = ExtendedPC(NULL);
172    if (ret_sp) *ret_sp = (intptr_t *)NULL;
173    if (ret_fp) *ret_fp = (intptr_t *)NULL;
174  }
175
176  return epc;
177}
178
179frame os::fetch_frame_from_context(const void* ucVoid) {
180  intptr_t* sp;
181  intptr_t* fp;
182  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
183  return frame(sp, fp, epc.pc());
184}
185
186// By default, gcc always saves frame pointer rfp on this stack. This
187// may get turned off by -fomit-frame-pointer.
188frame os::get_sender_for_C_frame(frame* fr) {
189#ifdef BUILTIN_SIM
190  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
191#else
192  return frame(fr->link(), fr->link(), fr->sender_pc());
193#endif
194}
195
196intptr_t* _get_previous_fp() {
197  register intptr_t **ebp __asm__ (SPELL_REG_FP);
198  return (intptr_t*) *ebp;   // we want what it points to.
199}
200
201
202frame os::current_frame() {
203  intptr_t* fp = _get_previous_fp();
204  frame myframe((intptr_t*)os::current_stack_pointer(),
205                (intptr_t*)fp,
206                CAST_FROM_FN_PTR(address, os::current_frame));
207  if (os::is_first_C_frame(&myframe)) {
208    // stack is not walkable
209    return frame();
210  } else {
211    return os::get_sender_for_C_frame(&myframe);
212  }
213}
214
215// Utility functions
216
217// From IA32 System Programming Guide
218enum {
219  trap_page_fault = 0xE
220};
221
222#ifdef BUILTIN_SIM
223extern "C" void Fetch32PFI () ;
224extern "C" void Fetch32Resume () ;
225extern "C" void FetchNPFI () ;
226extern "C" void FetchNResume () ;
227#endif
228
229// An operation in Unsafe has faulted.  We're going to return to the
230// instruction after the faulting load or store.  We also set
231// pending_unsafe_access_error so that at some point in the future our
232// user will get a helpful message.
233static address handle_unsafe_access(JavaThread* thread, address pc) {
234  // pc is the instruction which we must emulate
235  // doing a no-op is fine:  return garbage from the load
236  // therefore, compute npc
237  address npc = pc + NativeCall::instruction_size;
238
239  // request an async exception
240  thread->set_pending_unsafe_access_error();
241
242  // return address of next instruction to execute
243  return npc;
244}
245
246extern "C" JNIEXPORT int
247JVM_handle_linux_signal(int sig,
248                        siginfo_t* info,
249                        void* ucVoid,
250                        int abort_if_unrecognized) {
251  ucontext_t* uc = (ucontext_t*) ucVoid;
252
253  Thread* t = Thread::current_or_null_safe();
254
255  // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
256  // (no destructors can be run)
257  os::WatcherThreadCrashProtection::check_crash_protection(sig, t);
258
259  SignalHandlerMark shm(t);
260
261  // Note: it's not uncommon that JNI code uses signal/sigset to install
262  // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
263  // or have a SIGILL handler when detecting CPU type). When that happens,
264  // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
265  // avoid unnecessary crash when libjsig is not preloaded, try handle signals
266  // that do not require siginfo/ucontext first.
267
268  if (sig == SIGPIPE || sig == SIGXFSZ) {
269    // allow chained handler to go first
270    if (os::Linux::chained_handler(sig, info, ucVoid)) {
271      return true;
272    } else {
273      // Ignoring SIGPIPE/SIGXFSZ - see bugs 4229104 or 6499219
274      return true;
275    }
276  }
277
278  JavaThread* thread = NULL;
279  VMThread* vmthread = NULL;
280  if (os::Linux::signal_handlers_are_installed) {
281    if (t != NULL ){
282      if(t->is_Java_thread()) {
283        thread = (JavaThread*)t;
284      }
285      else if(t->is_VM_thread()){
286        vmthread = (VMThread *)t;
287      }
288    }
289  }
290/*
291  NOTE: does not seem to work on linux.
292  if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
293    // can't decode this kind of signal
294    info = NULL;
295  } else {
296    assert(sig == info->si_signo, "bad siginfo");
297  }
298*/
299  // decide if this trap can be handled by a stub
300  address stub = NULL;
301
302  address pc          = NULL;
303
304  //%note os_trap_1
305  if (info != NULL && uc != NULL && thread != NULL) {
306    pc = (address) os::Linux::ucontext_get_pc(uc);
307
308#ifdef BUILTIN_SIM
309    if (pc == (address) Fetch32PFI) {
310       uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
311       return 1 ;
312    }
313    if (pc == (address) FetchNPFI) {
314       uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ;
315       return 1 ;
316    }
317#else
318    if (StubRoutines::is_safefetch_fault(pc)) {
319      os::Linux::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
320      return 1;
321    }
322#endif
323
324    // Handle ALL stack overflow variations here
325    if (sig == SIGSEGV) {
326      address addr = (address) info->si_addr;
327
328      // check if fault address is within thread stack
329      if (thread->on_local_stack(addr)) {
330        // stack overflow
331        if (thread->in_stack_yellow_reserved_zone(addr)) {
332          thread->disable_stack_yellow_reserved_zone();
333          if (thread->thread_state() == _thread_in_Java) {
334            // Throw a stack overflow exception.  Guard pages will be reenabled
335            // while unwinding the stack.
336            stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
337          } else {
338            // Thread was in the vm or native code.  Return and try to finish.
339            return 1;
340          }
341        } else if (thread->in_stack_red_zone(addr)) {
342          // Fatal red zone violation.  Disable the guard pages and fall through
343          // to handle_unexpected_exception way down below.
344          thread->disable_stack_red_zone();
345          tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
346
347          // This is a likely cause, but hard to verify. Let's just print
348          // it as a hint.
349          tty->print_raw_cr("Please check if any of your loaded .so files has "
350                            "enabled executable stack (see man page execstack(8))");
351        } else {
352          // Accessing stack address below sp may cause SEGV if current
353          // thread has MAP_GROWSDOWN stack. This should only happen when
354          // current thread was created by user code with MAP_GROWSDOWN flag
355          // and then attached to VM. See notes in os_linux.cpp.
356          if (thread->osthread()->expanding_stack() == 0) {
357             thread->osthread()->set_expanding_stack();
358             if (os::Linux::manually_expand_stack(thread, addr)) {
359               thread->osthread()->clear_expanding_stack();
360               return 1;
361             }
362             thread->osthread()->clear_expanding_stack();
363          } else {
364             fatal("recursive segv. expanding stack.");
365          }
366        }
367      }
368    }
369
370    if (thread->thread_state() == _thread_in_Java) {
371      // Java thread running in Java code => find exception handler if any
372      // a fault inside compiled code, the interpreter, or a stub
373
374      // Handle signal from NativeJump::patch_verified_entry().
375      if ((sig == SIGILL || sig == SIGTRAP)
376          && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
377        if (TraceTraps) {
378          tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
379        }
380        stub = SharedRuntime::get_handle_wrong_method_stub();
381      } else if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
382        stub = SharedRuntime::get_poll_stub(pc);
383      } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
384        // BugId 4454115: A read from a MappedByteBuffer can fault
385        // here if the underlying file has been truncated.
386        // Do not crash the VM in such a case.
387        CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
388        nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
389        if (nm != NULL && nm->has_unsafe_access()) {
390          stub = handle_unsafe_access(thread, pc);
391        }
392      }
393      else
394
395      if (sig == SIGFPE  &&
396          (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
397        stub =
398          SharedRuntime::
399          continuation_for_implicit_exception(thread,
400                                              pc,
401                                              SharedRuntime::
402                                              IMPLICIT_DIVIDE_BY_ZERO);
403      } else if (sig == SIGSEGV &&
404               !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
405          // Determination of interpreter/vtable stub/compiled code null exception
406          stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
407      }
408    } else if (thread->thread_state() == _thread_in_vm &&
409               sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
410               thread->doing_unsafe_access()) {
411        stub = handle_unsafe_access(thread, pc);
412    }
413
414    // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
415    // and the heap gets shrunk before the field access.
416    if ((sig == SIGSEGV) || (sig == SIGBUS)) {
417      address addr = JNI_FastGetField::find_slowcase_pc(pc);
418      if (addr != (address)-1) {
419        stub = addr;
420      }
421    }
422
423    // Check to see if we caught the safepoint code in the
424    // process of write protecting the memory serialization page.
425    // It write enables the page immediately after protecting it
426    // so we can just return to retry the write.
427    if ((sig == SIGSEGV) &&
428        os::is_memory_serialize_page(thread, (address) info->si_addr)) {
429      // Block current thread until the memory serialize page permission restored.
430      os::block_on_serialize_page_trap();
431      return true;
432    }
433  }
434
435  if (stub != NULL) {
436    // save all thread context in case we need to restore it
437    if (thread != NULL) thread->set_saved_exception_pc(pc);
438
439    os::Linux::ucontext_set_pc(uc, stub);
440    return true;
441  }
442
443  // signal-chaining
444  if (os::Linux::chained_handler(sig, info, ucVoid)) {
445     return true;
446  }
447
448  if (!abort_if_unrecognized) {
449    // caller wants another chance, so give it to him
450    return false;
451  }
452
453  if (pc == NULL && uc != NULL) {
454    pc = os::Linux::ucontext_get_pc(uc);
455  }
456
457  // unmask current signal
458  sigset_t newset;
459  sigemptyset(&newset);
460  sigaddset(&newset, sig);
461  sigprocmask(SIG_UNBLOCK, &newset, NULL);
462
463  VMError::report_and_die(t, sig, pc, info, ucVoid);
464
465  ShouldNotReachHere();
466  return true; // Mute compiler
467}
468
469void os::Linux::init_thread_fpu_state(void) {
470}
471
472int os::Linux::get_fpu_control_word(void) {
473  return 0;
474}
475
476void os::Linux::set_fpu_control_word(int fpu_control) {
477}
478
479// Check that the linux kernel version is 2.4 or higher since earlier
480// versions do not support SSE without patches.
481bool os::supports_sse() {
482  return true;
483}
484
485bool os::is_allocatable(size_t bytes) {
486  return true;
487}
488
489////////////////////////////////////////////////////////////////////////////////
490// thread stack
491
492size_t os::Linux::min_stack_allowed  = 64 * K;
493
494// return default stack size for thr_type
495size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
496  // default stack size (compiler thread needs larger stack)
497  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
498  return s;
499}
500
501size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
502  // Creating guard page is very expensive. Java thread has HotSpot
503  // guard page, only enable glibc guard page for non-Java threads.
504  return (thr_type == java_thread ? 0 : page_size());
505}
506
507// Java thread:
508//
509//   Low memory addresses
510//    +------------------------+
511//    |                        |\  JavaThread created by VM does not have glibc
512//    |    glibc guard page    | - guard, attached Java thread usually has
513//    |                        |/  1 page glibc guard.
514// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
515//    |                        |\
516//    |  HotSpot Guard Pages   | - red and yellow pages
517//    |                        |/
518//    +------------------------+ JavaThread::stack_yellow_zone_base()
519//    |                        |\
520//    |      Normal Stack      | -
521//    |                        |/
522// P2 +------------------------+ Thread::stack_base()
523//
524// Non-Java thread:
525//
526//   Low memory addresses
527//    +------------------------+
528//    |                        |\
529//    |  glibc guard page      | - usually 1 page
530//    |                        |/
531// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
532//    |                        |\
533//    |      Normal Stack      | -
534//    |                        |/
535// P2 +------------------------+ Thread::stack_base()
536//
537// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
538//    pthread_attr_getstack()
539
540static void current_stack_region(address * bottom, size_t * size) {
541  if (os::Linux::is_initial_thread()) {
542     // initial thread needs special handling because pthread_getattr_np()
543     // may return bogus value.
544     *bottom = os::Linux::initial_thread_stack_bottom();
545     *size   = os::Linux::initial_thread_stack_size();
546  } else {
547     pthread_attr_t attr;
548
549     int rslt = pthread_getattr_np(pthread_self(), &attr);
550
551     // JVM needs to know exact stack location, abort if it fails
552     if (rslt != 0) {
553       if (rslt == ENOMEM) {
554         vm_exit_out_of_memory(0, OOM_MMAP_ERROR, "pthread_getattr_np");
555       } else {
556         fatal("pthread_getattr_np failed with errno = %d", rslt);
557       }
558     }
559
560     if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
561         fatal("Can not locate current stack attributes!");
562     }
563
564     pthread_attr_destroy(&attr);
565
566  }
567  assert(os::current_stack_pointer() >= *bottom &&
568         os::current_stack_pointer() < *bottom + *size, "just checking");
569}
570
571address os::current_stack_base() {
572  address bottom;
573  size_t size;
574  current_stack_region(&bottom, &size);
575  return (bottom + size);
576}
577
578size_t os::current_stack_size() {
579  // stack size includes normal stack and HotSpot guard pages
580  address bottom;
581  size_t size;
582  current_stack_region(&bottom, &size);
583  return size;
584}
585
586/////////////////////////////////////////////////////////////////////////////
587// helper functions for fatal error handler
588
589void os::print_context(outputStream *st, const void *context) {
590  if (context == NULL) return;
591
592  const ucontext_t *uc = (const ucontext_t*)context;
593  st->print_cr("Registers:");
594#ifdef BUILTIN_SIM
595  st->print(  "RAX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RAX]);
596  st->print(", RBX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBX]);
597  st->print(", RCX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RCX]);
598  st->print(", RDX=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDX]);
599  st->cr();
600  st->print(  "RSP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSP]);
601  st->print(", RBP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RBP]);
602  st->print(", RSI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RSI]);
603  st->print(", RDI=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RDI]);
604  st->cr();
605  st->print(  "R8 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R8]);
606  st->print(", R9 =" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R9]);
607  st->print(", R10=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R10]);
608  st->print(", R11=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R11]);
609  st->cr();
610  st->print(  "R12=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R12]);
611  st->print(", R13=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R13]);
612  st->print(", R14=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R14]);
613  st->print(", R15=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_R15]);
614  st->cr();
615  st->print(  "RIP=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_RIP]);
616  st->print(", EFLAGS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_EFL]);
617  st->print(", CSGSFS=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_CSGSFS]);
618  st->print(", ERR=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_ERR]);
619  st->cr();
620  st->print("  TRAPNO=" INTPTR_FORMAT, uc->uc_mcontext.gregs[REG_TRAPNO]);
621  st->cr();
622#else
623  for (int r = 0; r < 31; r++)
624    st->print_cr(  "R%d=" INTPTR_FORMAT, r, (size_t)uc->uc_mcontext.regs[r]);
625#endif
626  st->cr();
627
628  intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
629  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
630  print_hex_dump(st, (address)sp, (address)(sp + 8*sizeof(intptr_t)), sizeof(intptr_t));
631  st->cr();
632
633  // Note: it may be unsafe to inspect memory near pc. For example, pc may
634  // point to garbage if entry point in an nmethod is corrupted. Leave
635  // this at the end, and hope for the best.
636  address pc = os::Linux::ucontext_get_pc(uc);
637  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc));
638  print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
639}
640
641void os::print_register_info(outputStream *st, const void *context) {
642  if (context == NULL) return;
643
644  const ucontext_t *uc = (const ucontext_t*)context;
645
646  st->print_cr("Register to memory mapping:");
647  st->cr();
648
649  // this is horrendously verbose but the layout of the registers in the
650  // context does not match how we defined our abstract Register set, so
651  // we can't just iterate through the gregs area
652
653  // this is only for the "general purpose" registers
654
655#ifdef BUILTIN_SIM
656  st->print("RAX="); print_location(st, uc->uc_mcontext.gregs[REG_RAX]);
657  st->print("RBX="); print_location(st, uc->uc_mcontext.gregs[REG_RBX]);
658  st->print("RCX="); print_location(st, uc->uc_mcontext.gregs[REG_RCX]);
659  st->print("RDX="); print_location(st, uc->uc_mcontext.gregs[REG_RDX]);
660  st->print("RSP="); print_location(st, uc->uc_mcontext.gregs[REG_RSP]);
661  st->print("RBP="); print_location(st, uc->uc_mcontext.gregs[REG_RBP]);
662  st->print("RSI="); print_location(st, uc->uc_mcontext.gregs[REG_RSI]);
663  st->print("RDI="); print_location(st, uc->uc_mcontext.gregs[REG_RDI]);
664  st->print("R8 ="); print_location(st, uc->uc_mcontext.gregs[REG_R8]);
665  st->print("R9 ="); print_location(st, uc->uc_mcontext.gregs[REG_R9]);
666  st->print("R10="); print_location(st, uc->uc_mcontext.gregs[REG_R10]);
667  st->print("R11="); print_location(st, uc->uc_mcontext.gregs[REG_R11]);
668  st->print("R12="); print_location(st, uc->uc_mcontext.gregs[REG_R12]);
669  st->print("R13="); print_location(st, uc->uc_mcontext.gregs[REG_R13]);
670  st->print("R14="); print_location(st, uc->uc_mcontext.gregs[REG_R14]);
671  st->print("R15="); print_location(st, uc->uc_mcontext.gregs[REG_R15]);
672#else
673  for (int r = 0; r < 31; r++)
674    st->print_cr(  "R%d=" INTPTR_FORMAT, r, (uintptr_t)uc->uc_mcontext.regs[r]);
675#endif
676  st->cr();
677}
678
679void os::setup_fpu() {
680}
681
682#ifndef PRODUCT
683void os::verify_stack_alignment() {
684  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
685}
686#endif
687
688int os::extra_bang_size_in_bytes() {
689  // AArch64 does not require the additional stack bang.
690  return 0;
691}
692
693extern "C" {
694  int SpinPause() {
695    return 0;
696  }
697
698  void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
699    if (from > to) {
700      jshort *end = from + count;
701      while (from < end)
702        *(to++) = *(from++);
703    }
704    else if (from < to) {
705      jshort *end = from;
706      from += count - 1;
707      to   += count - 1;
708      while (from >= end)
709        *(to--) = *(from--);
710    }
711  }
712  void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
713    if (from > to) {
714      jint *end = from + count;
715      while (from < end)
716        *(to++) = *(from++);
717    }
718    else if (from < to) {
719      jint *end = from;
720      from += count - 1;
721      to   += count - 1;
722      while (from >= end)
723        *(to--) = *(from--);
724    }
725  }
726  void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
727    if (from > to) {
728      jlong *end = from + count;
729      while (from < end)
730        os::atomic_copy64(from++, to++);
731    }
732    else if (from < to) {
733      jlong *end = from;
734      from += count - 1;
735      to   += count - 1;
736      while (from >= end)
737        os::atomic_copy64(from--, to--);
738    }
739  }
740
741  void _Copy_arrayof_conjoint_bytes(HeapWord* from,
742                                    HeapWord* to,
743                                    size_t    count) {
744    memmove(to, from, count);
745  }
746  void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
747                                      HeapWord* to,
748                                      size_t    count) {
749    memmove(to, from, count * 2);
750  }
751  void _Copy_arrayof_conjoint_jints(HeapWord* from,
752                                    HeapWord* to,
753                                    size_t    count) {
754    memmove(to, from, count * 4);
755  }
756  void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
757                                     HeapWord* to,
758                                     size_t    count) {
759    memmove(to, from, count * 8);
760  }
761};
762