os_windows_x86.cpp revision 13524:38ff008318c3
1/*
2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// no precompiled headers
26#include "asm/macroAssembler.hpp"
27#include "classfile/classLoader.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "classfile/vmSymbols.hpp"
30#include "code/icBuffer.hpp"
31#include "code/vtableStubs.hpp"
32#include "interpreter/interpreter.hpp"
33#include "jvm_windows.h"
34#include "memory/allocation.inline.hpp"
35#include "memory/resourceArea.hpp"
36#include "nativeInst_x86.hpp"
37#include "os_share_windows.hpp"
38#include "prims/jniFastGetField.hpp"
39#include "prims/jvm.h"
40#include "prims/jvm_misc.hpp"
41#include "runtime/arguments.hpp"
42#include "runtime/extendedPC.hpp"
43#include "runtime/frame.inline.hpp"
44#include "runtime/interfaceSupport.hpp"
45#include "runtime/java.hpp"
46#include "runtime/javaCalls.hpp"
47#include "runtime/mutexLocker.hpp"
48#include "runtime/osThread.hpp"
49#include "runtime/sharedRuntime.hpp"
50#include "runtime/stubRoutines.hpp"
51#include "runtime/thread.inline.hpp"
52#include "runtime/timer.hpp"
53#include "unwind_windows_x86.hpp"
54#include "utilities/events.hpp"
55#include "utilities/vmError.hpp"
56#include "windbghelp.hpp"
57
58
59#undef REG_SP
60#undef REG_FP
61#undef REG_PC
62#ifdef AMD64
63#define REG_SP Rsp
64#define REG_FP Rbp
65#define REG_PC Rip
66#else
67#define REG_SP Esp
68#define REG_FP Ebp
69#define REG_PC Eip
70#endif // AMD64
71
72extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
73
74// Install a win32 structured exception handler around thread.
75void os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread) {
76  __try {
77
78#ifndef AMD64
79    // We store the current thread in this wrapperthread location
80    // and determine how far away this address is from the structured
81    // execption pointer that FS:[0] points to.  This get_thread
82    // code can then get the thread pointer via FS.
83    //
84    // Warning:  This routine must NEVER be inlined since we'd end up with
85    //           multiple offsets.
86    //
87    volatile Thread* wrapperthread = thread;
88
89    if (os::win32::get_thread_ptr_offset() == 0) {
90      int thread_ptr_offset;
91      __asm {
92        lea eax, dword ptr wrapperthread;
93        sub eax, dword ptr FS:[0H];
94        mov thread_ptr_offset, eax
95      };
96      os::win32::set_thread_ptr_offset(thread_ptr_offset);
97    }
98#ifdef ASSERT
99    // Verify that the offset hasn't changed since we initally captured
100    // it. This might happen if we accidentally ended up with an
101    // inlined version of this routine.
102    else {
103      int test_thread_ptr_offset;
104      __asm {
105        lea eax, dword ptr wrapperthread;
106        sub eax, dword ptr FS:[0H];
107        mov test_thread_ptr_offset, eax
108      };
109      assert(test_thread_ptr_offset == os::win32::get_thread_ptr_offset(),
110             "thread pointer offset from SEH changed");
111    }
112#endif // ASSERT
113#endif // !AMD64
114
115    f(value, method, args, thread);
116  } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
117      // Nothing to do.
118  }
119}
120
121#ifdef AMD64
122
123// This is the language specific handler for exceptions
124// originating from dynamically generated code.
125// We call the standard structured exception handler
126// We only expect Continued Execution since we cannot unwind
127// from generated code.
128LONG HandleExceptionFromCodeCache(
129  IN PEXCEPTION_RECORD ExceptionRecord,
130  IN ULONG64 EstablisherFrame,
131  IN OUT PCONTEXT ContextRecord,
132  IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
133  EXCEPTION_POINTERS ep;
134  LONG result;
135
136  ep.ExceptionRecord = ExceptionRecord;
137  ep.ContextRecord = ContextRecord;
138
139  result = topLevelExceptionFilter(&ep);
140
141  // We better only get a CONTINUE_EXECUTION from our handler
142  // since we don't have unwind information registered.
143
144  guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
145             "Unexpected result from topLevelExceptionFilter");
146
147  return(ExceptionContinueExecution);
148}
149
150
151// Structure containing the Windows Data Structures required
152// to register our Code Cache exception handler.
153// We put these in the CodeCache since the API requires
154// all addresses in these structures are relative to the Code
155// area registered with RtlAddFunctionTable.
156typedef struct {
157  char ExceptionHandlerInstr[16];  // jmp HandleExceptionFromCodeCache
158  RUNTIME_FUNCTION rt;
159  UNWIND_INFO_EH_ONLY unw;
160} DynamicCodeData, *pDynamicCodeData;
161
162#endif // AMD64
163//
164// Register our CodeCache area with the OS so it will dispatch exceptions
165// to our topLevelExceptionFilter when we take an exception in our
166// dynamically generated code.
167//
168// Arguments:  low and high are the address of the full reserved
169// codeCache area
170//
171bool os::register_code_area(char *low, char *high) {
172#ifdef AMD64
173
174  ResourceMark rm;
175
176  pDynamicCodeData pDCD;
177  PRUNTIME_FUNCTION prt;
178  PUNWIND_INFO_EH_ONLY punwind;
179
180  BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
181  CodeBuffer cb(blob);
182  MacroAssembler* masm = new MacroAssembler(&cb);
183  pDCD = (pDynamicCodeData) masm->pc();
184
185  masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
186  masm->flush();
187
188  // Create an Unwind Structure specifying no unwind info
189  // other than an Exception Handler
190  punwind = &pDCD->unw;
191  punwind->Version = 1;
192  punwind->Flags = UNW_FLAG_EHANDLER;
193  punwind->SizeOfProlog = 0;
194  punwind->CountOfCodes = 0;
195  punwind->FrameRegister = 0;
196  punwind->FrameOffset = 0;
197  punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
198                              (char*)low;
199  punwind->ExceptionData[0] = 0;
200
201  // This structure describes the covered dynamic code area.
202  // Addresses are relative to the beginning on the code cache area
203  prt = &pDCD->rt;
204  prt->BeginAddress = 0;
205  prt->EndAddress = (ULONG)(high - low);
206  prt->UnwindData = ((char *)punwind - low);
207
208  guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
209            "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
210
211#endif // AMD64
212  return true;
213}
214
215void os::initialize_thread(Thread* thr) {
216// Nothing to do.
217}
218
219// Atomics and Stub Functions
220
221typedef jint      xchg_func_t            (jint,     volatile jint*);
222typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
223typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
224typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
225typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
226typedef jint      add_func_t             (jint,     volatile jint*);
227typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
228
229#ifdef AMD64
230
231jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
232  // try to use the stub:
233  xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
234
235  if (func != NULL) {
236    os::atomic_xchg_func = func;
237    return (*func)(exchange_value, dest);
238  }
239  assert(Threads::number_of_threads() == 0, "for bootstrap only");
240
241  jint old_value = *dest;
242  *dest = exchange_value;
243  return old_value;
244}
245
246intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
247  // try to use the stub:
248  xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
249
250  if (func != NULL) {
251    os::atomic_xchg_ptr_func = func;
252    return (*func)(exchange_value, dest);
253  }
254  assert(Threads::number_of_threads() == 0, "for bootstrap only");
255
256  intptr_t old_value = *dest;
257  *dest = exchange_value;
258  return old_value;
259}
260
261
262jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
263  // try to use the stub:
264  cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
265
266  if (func != NULL) {
267    os::atomic_cmpxchg_func = func;
268    return (*func)(exchange_value, dest, compare_value);
269  }
270  assert(Threads::number_of_threads() == 0, "for bootstrap only");
271
272  jint old_value = *dest;
273  if (old_value == compare_value)
274    *dest = exchange_value;
275  return old_value;
276}
277
278jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
279  // try to use the stub:
280  cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
281
282  if (func != NULL) {
283    os::atomic_cmpxchg_byte_func = func;
284    return (*func)(exchange_value, dest, compare_value);
285  }
286  assert(Threads::number_of_threads() == 0, "for bootstrap only");
287
288  jbyte old_value = *dest;
289  if (old_value == compare_value)
290    *dest = exchange_value;
291  return old_value;
292}
293
294#endif // AMD64
295
296jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
297  // try to use the stub:
298  cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
299
300  if (func != NULL) {
301    os::atomic_cmpxchg_long_func = func;
302    return (*func)(exchange_value, dest, compare_value);
303  }
304  assert(Threads::number_of_threads() == 0, "for bootstrap only");
305
306  jlong old_value = *dest;
307  if (old_value == compare_value)
308    *dest = exchange_value;
309  return old_value;
310}
311
312#ifdef AMD64
313
314jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
315  // try to use the stub:
316  add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
317
318  if (func != NULL) {
319    os::atomic_add_func = func;
320    return (*func)(add_value, dest);
321  }
322  assert(Threads::number_of_threads() == 0, "for bootstrap only");
323
324  return (*dest) += add_value;
325}
326
327intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
328  // try to use the stub:
329  add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
330
331  if (func != NULL) {
332    os::atomic_add_ptr_func = func;
333    return (*func)(add_value, dest);
334  }
335  assert(Threads::number_of_threads() == 0, "for bootstrap only");
336
337  return (*dest) += add_value;
338}
339
340xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
341xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
342cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
343cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
344add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
345add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
346
347#endif // AMD64
348
349cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
350
351#ifdef AMD64
352/*
353 * Windows/x64 does not use stack frames the way expected by Java:
354 * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
355 * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
356 *     not be RBP.
357 * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
358 *
359 * So it's not possible to print the native stack using the
360 *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
361 * loop in vmError.cpp. We need to roll our own loop.
362 */
363bool os::platform_print_native_stack(outputStream* st, const void* context,
364                                     char *buf, int buf_size)
365{
366  CONTEXT ctx;
367  if (context != NULL) {
368    memcpy(&ctx, context, sizeof(ctx));
369  } else {
370    RtlCaptureContext(&ctx);
371  }
372
373  st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
374
375  STACKFRAME stk;
376  memset(&stk, 0, sizeof(stk));
377  stk.AddrStack.Offset    = ctx.Rsp;
378  stk.AddrStack.Mode      = AddrModeFlat;
379  stk.AddrFrame.Offset    = ctx.Rbp;
380  stk.AddrFrame.Mode      = AddrModeFlat;
381  stk.AddrPC.Offset       = ctx.Rip;
382  stk.AddrPC.Mode         = AddrModeFlat;
383
384  int count = 0;
385  address lastpc = 0;
386  while (count++ < StackPrintLimit) {
387    intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
388    intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
389    address pc = (address)stk.AddrPC.Offset;
390
391    if (pc != NULL) {
392      if (count == 2 && lastpc == pc) {
393        // Skip it -- StackWalk64() may return the same PC
394        // (but different SP) on the first try.
395      } else {
396        // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
397        // may not contain what Java expects, and may cause the frame() constructor
398        // to crash. Let's just print out the symbolic address.
399        frame::print_C_frame(st, buf, buf_size, pc);
400        st->cr();
401      }
402      lastpc = pc;
403    }
404
405    PVOID p = WindowsDbgHelp::symFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
406    if (!p) {
407      // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
408      break;
409    }
410
411    BOOL result = WindowsDbgHelp::stackWalk64(
412        IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
413        GetCurrentProcess(),       // __in      HANDLE hProcess,
414        GetCurrentThread(),        // __in      HANDLE hThread,
415        &stk,                      // __inout   LP STACKFRAME64 StackFrame,
416        &ctx);                     // __inout   PVOID ContextRecord,
417
418    if (!result) {
419      break;
420    }
421  }
422  if (count > StackPrintLimit) {
423    st->print_cr("...<more frames>...");
424  }
425  st->cr();
426
427  return true;
428}
429#endif // AMD64
430
431ExtendedPC os::fetch_frame_from_context(const void* ucVoid,
432                    intptr_t** ret_sp, intptr_t** ret_fp) {
433
434  ExtendedPC  epc;
435  CONTEXT* uc = (CONTEXT*)ucVoid;
436
437  if (uc != NULL) {
438    epc = ExtendedPC((address)uc->REG_PC);
439    if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
440    if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
441  } else {
442    // construct empty ExtendedPC for return value checking
443    epc = ExtendedPC(NULL);
444    if (ret_sp) *ret_sp = (intptr_t *)NULL;
445    if (ret_fp) *ret_fp = (intptr_t *)NULL;
446  }
447
448  return epc;
449}
450
451frame os::fetch_frame_from_context(const void* ucVoid) {
452  intptr_t* sp;
453  intptr_t* fp;
454  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
455  return frame(sp, fp, epc.pc());
456}
457
458// VC++ does not save frame pointer on stack in optimized build. It
459// can be turned off by /Oy-. If we really want to walk C frames,
460// we can use the StackWalk() API.
461frame os::get_sender_for_C_frame(frame* fr) {
462  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
463}
464
465#ifndef AMD64
466// Returns an estimate of the current stack pointer. Result must be guaranteed
467// to point into the calling threads stack, and be no lower than the current
468// stack pointer.
469address os::current_stack_pointer() {
470  int dummy;
471  address sp = (address)&dummy;
472  return sp;
473}
474#else
475// Returns the current stack pointer. Accurate value needed for
476// os::verify_stack_alignment().
477address os::current_stack_pointer() {
478  typedef address get_sp_func();
479  get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*,
480                                     StubRoutines::x86::get_previous_sp_entry());
481  return (*func)();
482}
483#endif
484
485
486#ifndef AMD64
487intptr_t* _get_previous_fp() {
488  intptr_t **frameptr;
489  __asm {
490    mov frameptr, ebp
491  };
492  // ebp (frameptr) is for this frame (_get_previous_fp). We want the ebp for the
493  // caller of os::current_frame*(), so go up two frames. However, for
494  // optimized builds, _get_previous_fp() will be inlined, so only go
495  // up 1 frame in that case.
496#ifdef _NMT_NOINLINE_
497  return **(intptr_t***)frameptr;
498#else
499  return *frameptr;
500#endif
501}
502#endif // !AMD64
503
504frame os::current_frame() {
505
506#ifdef AMD64
507  // apparently _asm not supported on windows amd64
508  typedef intptr_t*      get_fp_func           ();
509  get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
510                                     StubRoutines::x86::get_previous_fp_entry());
511  if (func == NULL) return frame();
512  intptr_t* fp = (*func)();
513  if (fp == NULL) {
514    return frame();
515  }
516#else
517  intptr_t* fp = _get_previous_fp();
518#endif // AMD64
519
520  frame myframe((intptr_t*)os::current_stack_pointer(),
521                (intptr_t*)fp,
522                CAST_FROM_FN_PTR(address, os::current_frame));
523  if (os::is_first_C_frame(&myframe)) {
524    // stack is not walkable
525    return frame();
526  } else {
527    return os::get_sender_for_C_frame(&myframe);
528  }
529}
530
531void os::print_context(outputStream *st, const void *context) {
532  if (context == NULL) return;
533
534  const CONTEXT* uc = (const CONTEXT*)context;
535
536  st->print_cr("Registers:");
537#ifdef AMD64
538  st->print(  "RAX=" INTPTR_FORMAT, uc->Rax);
539  st->print(", RBX=" INTPTR_FORMAT, uc->Rbx);
540  st->print(", RCX=" INTPTR_FORMAT, uc->Rcx);
541  st->print(", RDX=" INTPTR_FORMAT, uc->Rdx);
542  st->cr();
543  st->print(  "RSP=" INTPTR_FORMAT, uc->Rsp);
544  st->print(", RBP=" INTPTR_FORMAT, uc->Rbp);
545  st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
546  st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
547  st->cr();
548  st->print(  "R8 =" INTPTR_FORMAT, uc->R8);
549  st->print(", R9 =" INTPTR_FORMAT, uc->R9);
550  st->print(", R10=" INTPTR_FORMAT, uc->R10);
551  st->print(", R11=" INTPTR_FORMAT, uc->R11);
552  st->cr();
553  st->print(  "R12=" INTPTR_FORMAT, uc->R12);
554  st->print(", R13=" INTPTR_FORMAT, uc->R13);
555  st->print(", R14=" INTPTR_FORMAT, uc->R14);
556  st->print(", R15=" INTPTR_FORMAT, uc->R15);
557  st->cr();
558  st->print(  "RIP=" INTPTR_FORMAT, uc->Rip);
559  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
560#else
561  st->print(  "EAX=" INTPTR_FORMAT, uc->Eax);
562  st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
563  st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
564  st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
565  st->cr();
566  st->print(  "ESP=" INTPTR_FORMAT, uc->Esp);
567  st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
568  st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
569  st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
570  st->cr();
571  st->print(  "EIP=" INTPTR_FORMAT, uc->Eip);
572  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
573#endif // AMD64
574  st->cr();
575  st->cr();
576
577  intptr_t *sp = (intptr_t *)uc->REG_SP;
578  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
579  print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
580  st->cr();
581
582  // Note: it may be unsafe to inspect memory near pc. For example, pc may
583  // point to garbage if entry point in an nmethod is corrupted. Leave
584  // this at the end, and hope for the best.
585  address pc = (address)uc->REG_PC;
586  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
587  print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
588  st->cr();
589}
590
591
592void os::print_register_info(outputStream *st, const void *context) {
593  if (context == NULL) return;
594
595  const CONTEXT* uc = (const CONTEXT*)context;
596
597  st->print_cr("Register to memory mapping:");
598  st->cr();
599
600  // this is only for the "general purpose" registers
601
602#ifdef AMD64
603  st->print("RIP="); print_location(st, uc->Rip);
604  st->print("RAX="); print_location(st, uc->Rax);
605  st->print("RBX="); print_location(st, uc->Rbx);
606  st->print("RCX="); print_location(st, uc->Rcx);
607  st->print("RDX="); print_location(st, uc->Rdx);
608  st->print("RSP="); print_location(st, uc->Rsp);
609  st->print("RBP="); print_location(st, uc->Rbp);
610  st->print("RSI="); print_location(st, uc->Rsi);
611  st->print("RDI="); print_location(st, uc->Rdi);
612  st->print("R8 ="); print_location(st, uc->R8);
613  st->print("R9 ="); print_location(st, uc->R9);
614  st->print("R10="); print_location(st, uc->R10);
615  st->print("R11="); print_location(st, uc->R11);
616  st->print("R12="); print_location(st, uc->R12);
617  st->print("R13="); print_location(st, uc->R13);
618  st->print("R14="); print_location(st, uc->R14);
619  st->print("R15="); print_location(st, uc->R15);
620#else
621  st->print("EIP="); print_location(st, uc->Eip);
622  st->print("EAX="); print_location(st, uc->Eax);
623  st->print("EBX="); print_location(st, uc->Ebx);
624  st->print("ECX="); print_location(st, uc->Ecx);
625  st->print("EDX="); print_location(st, uc->Edx);
626  st->print("ESP="); print_location(st, uc->Esp);
627  st->print("EBP="); print_location(st, uc->Ebp);
628  st->print("ESI="); print_location(st, uc->Esi);
629  st->print("EDI="); print_location(st, uc->Edi);
630#endif
631
632  st->cr();
633}
634
635extern "C" int SpinPause () {
636#ifdef AMD64
637   return 0 ;
638#else
639   // pause == rep:nop
640   // On systems that don't support pause a rep:nop
641   // is executed as a nop.  The rep: prefix is ignored.
642   _asm {
643      pause ;
644   };
645   return 1 ;
646#endif // AMD64
647}
648
649
650void os::setup_fpu() {
651#ifndef AMD64
652  int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
653  __asm fldcw fpu_cntrl_word;
654#endif // !AMD64
655}
656
657#ifndef PRODUCT
658void os::verify_stack_alignment() {
659#ifdef AMD64
660  // The current_stack_pointer() calls generated get_previous_sp stub routine.
661  // Only enable the assert after the routine becomes available.
662  if (StubRoutines::code1() != NULL) {
663    assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
664  }
665#endif
666}
667#endif
668
669int os::extra_bang_size_in_bytes() {
670  // JDK-8050147 requires the full cache line bang for x86.
671  return VM_Version::L1_line_size();
672}
673