os_windows_x86.cpp revision 9248:6ab7e19c9220
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// no precompiled headers
26#include "asm/macroAssembler.hpp"
27#include "classfile/classLoader.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "classfile/vmSymbols.hpp"
30#include "code/icBuffer.hpp"
31#include "code/vtableStubs.hpp"
32#include "decoder_windows.hpp"
33#include "interpreter/interpreter.hpp"
34#include "jvm_windows.h"
35#include "memory/allocation.inline.hpp"
36#include "mutex_windows.inline.hpp"
37#include "nativeInst_x86.hpp"
38#include "os_share_windows.hpp"
39#include "prims/jniFastGetField.hpp"
40#include "prims/jvm.h"
41#include "prims/jvm_misc.hpp"
42#include "runtime/arguments.hpp"
43#include "runtime/extendedPC.hpp"
44#include "runtime/frame.inline.hpp"
45#include "runtime/interfaceSupport.hpp"
46#include "runtime/java.hpp"
47#include "runtime/javaCalls.hpp"
48#include "runtime/mutexLocker.hpp"
49#include "runtime/osThread.hpp"
50#include "runtime/sharedRuntime.hpp"
51#include "runtime/stubRoutines.hpp"
52#include "runtime/thread.inline.hpp"
53#include "runtime/timer.hpp"
54#include "utilities/events.hpp"
55#include "utilities/vmError.hpp"
56
57# include "unwind_windows_x86.hpp"
58#undef REG_SP
59#undef REG_FP
60#undef REG_PC
61#ifdef AMD64
62#define REG_SP Rsp
63#define REG_FP Rbp
64#define REG_PC Rip
65#else
66#define REG_SP Esp
67#define REG_FP Ebp
68#define REG_PC Eip
69#endif // AMD64
70
71extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
72
73// Install a win32 structured exception handler around thread.
74void os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method, JavaCallArguments* args, Thread* thread) {
75  __try {
76
77#ifndef AMD64
78    // We store the current thread in this wrapperthread location
79    // and determine how far away this address is from the structured
80    // execption pointer that FS:[0] points to.  This get_thread
81    // code can then get the thread pointer via FS.
82    //
83    // Warning:  This routine must NEVER be inlined since we'd end up with
84    //           multiple offsets.
85    //
86    volatile Thread* wrapperthread = thread;
87
88    if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) {
89      int thread_ptr_offset;
90      __asm {
91        lea eax, dword ptr wrapperthread;
92        sub eax, dword ptr FS:[0H];
93        mov thread_ptr_offset, eax
94      };
95      ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset);
96    }
97#ifdef ASSERT
98    // Verify that the offset hasn't changed since we initally captured
99    // it. This might happen if we accidentally ended up with an
100    // inlined version of this routine.
101    else {
102      int test_thread_ptr_offset;
103      __asm {
104        lea eax, dword ptr wrapperthread;
105        sub eax, dword ptr FS:[0H];
106        mov test_thread_ptr_offset, eax
107      };
108      assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(),
109             "thread pointer offset from SEH changed");
110    }
111#endif // ASSERT
112#endif // !AMD64
113
114    f(value, method, args, thread);
115  } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
116      // Nothing to do.
117  }
118}
119
120#ifdef AMD64
121
122// This is the language specific handler for exceptions
123// originating from dynamically generated code.
124// We call the standard structured exception handler
125// We only expect Continued Execution since we cannot unwind
126// from generated code.
127LONG HandleExceptionFromCodeCache(
128  IN PEXCEPTION_RECORD ExceptionRecord,
129  IN ULONG64 EstablisherFrame,
130  IN OUT PCONTEXT ContextRecord,
131  IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
132  EXCEPTION_POINTERS ep;
133  LONG result;
134
135  ep.ExceptionRecord = ExceptionRecord;
136  ep.ContextRecord = ContextRecord;
137
138  result = topLevelExceptionFilter(&ep);
139
140  // We better only get a CONTINUE_EXECUTION from our handler
141  // since we don't have unwind information registered.
142
143  guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
144             "Unexpected result from topLevelExceptionFilter");
145
146  return(ExceptionContinueExecution);
147}
148
149
150// Structure containing the Windows Data Structures required
151// to register our Code Cache exception handler.
152// We put these in the CodeCache since the API requires
153// all addresses in these structures are relative to the Code
154// area registered with RtlAddFunctionTable.
155typedef struct {
156  char ExceptionHandlerInstr[16];  // jmp HandleExceptionFromCodeCache
157  RUNTIME_FUNCTION rt;
158  UNWIND_INFO_EH_ONLY unw;
159} DynamicCodeData, *pDynamicCodeData;
160
161#endif // AMD64
162//
163// Register our CodeCache area with the OS so it will dispatch exceptions
164// to our topLevelExceptionFilter when we take an exception in our
165// dynamically generated code.
166//
167// Arguments:  low and high are the address of the full reserved
168// codeCache area
169//
170bool os::register_code_area(char *low, char *high) {
171#ifdef AMD64
172
173  ResourceMark rm;
174
175  pDynamicCodeData pDCD;
176  PRUNTIME_FUNCTION prt;
177  PUNWIND_INFO_EH_ONLY punwind;
178
179  BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
180  CodeBuffer cb(blob);
181  MacroAssembler* masm = new MacroAssembler(&cb);
182  pDCD = (pDynamicCodeData) masm->pc();
183
184  masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
185  masm->flush();
186
187  // Create an Unwind Structure specifying no unwind info
188  // other than an Exception Handler
189  punwind = &pDCD->unw;
190  punwind->Version = 1;
191  punwind->Flags = UNW_FLAG_EHANDLER;
192  punwind->SizeOfProlog = 0;
193  punwind->CountOfCodes = 0;
194  punwind->FrameRegister = 0;
195  punwind->FrameOffset = 0;
196  punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
197                              (char*)low;
198  punwind->ExceptionData[0] = 0;
199
200  // This structure describes the covered dynamic code area.
201  // Addresses are relative to the beginning on the code cache area
202  prt = &pDCD->rt;
203  prt->BeginAddress = 0;
204  prt->EndAddress = (ULONG)(high - low);
205  prt->UnwindData = ((char *)punwind - low);
206
207  guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
208            "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
209
210#endif // AMD64
211  return true;
212}
213
214void os::initialize_thread(Thread* thr) {
215// Nothing to do.
216}
217
218// Atomics and Stub Functions
219
220typedef jint      xchg_func_t            (jint,     volatile jint*);
221typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
222typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
223typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
224typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
225typedef jint      add_func_t             (jint,     volatile jint*);
226typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
227
228#ifdef AMD64
229
230jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
231  // try to use the stub:
232  xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
233
234  if (func != NULL) {
235    os::atomic_xchg_func = func;
236    return (*func)(exchange_value, dest);
237  }
238  assert(Threads::number_of_threads() == 0, "for bootstrap only");
239
240  jint old_value = *dest;
241  *dest = exchange_value;
242  return old_value;
243}
244
245intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
246  // try to use the stub:
247  xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
248
249  if (func != NULL) {
250    os::atomic_xchg_ptr_func = func;
251    return (*func)(exchange_value, dest);
252  }
253  assert(Threads::number_of_threads() == 0, "for bootstrap only");
254
255  intptr_t old_value = *dest;
256  *dest = exchange_value;
257  return old_value;
258}
259
260
261jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
262  // try to use the stub:
263  cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
264
265  if (func != NULL) {
266    os::atomic_cmpxchg_func = func;
267    return (*func)(exchange_value, dest, compare_value);
268  }
269  assert(Threads::number_of_threads() == 0, "for bootstrap only");
270
271  jint old_value = *dest;
272  if (old_value == compare_value)
273    *dest = exchange_value;
274  return old_value;
275}
276
277jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
278  // try to use the stub:
279  cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
280
281  if (func != NULL) {
282    os::atomic_cmpxchg_byte_func = func;
283    return (*func)(exchange_value, dest, compare_value);
284  }
285  assert(Threads::number_of_threads() == 0, "for bootstrap only");
286
287  jbyte old_value = *dest;
288  if (old_value == compare_value)
289    *dest = exchange_value;
290  return old_value;
291}
292
293#endif // AMD64
294
295jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
296  // try to use the stub:
297  cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
298
299  if (func != NULL) {
300    os::atomic_cmpxchg_long_func = func;
301    return (*func)(exchange_value, dest, compare_value);
302  }
303  assert(Threads::number_of_threads() == 0, "for bootstrap only");
304
305  jlong old_value = *dest;
306  if (old_value == compare_value)
307    *dest = exchange_value;
308  return old_value;
309}
310
311#ifdef AMD64
312
313jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
314  // try to use the stub:
315  add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
316
317  if (func != NULL) {
318    os::atomic_add_func = func;
319    return (*func)(add_value, dest);
320  }
321  assert(Threads::number_of_threads() == 0, "for bootstrap only");
322
323  return (*dest) += add_value;
324}
325
326intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
327  // try to use the stub:
328  add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
329
330  if (func != NULL) {
331    os::atomic_add_ptr_func = func;
332    return (*func)(add_value, dest);
333  }
334  assert(Threads::number_of_threads() == 0, "for bootstrap only");
335
336  return (*dest) += add_value;
337}
338
339xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
340xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
341cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
342cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
343add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
344add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
345
346#endif // AMD64
347
348cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
349
350#ifdef AMD64
351/*
352 * Windows/x64 does not use stack frames the way expected by Java:
353 * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
354 * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
355 *     not be RBP.
356 * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
357 *
358 * So it's not possible to print the native stack using the
359 *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
360 * loop in vmError.cpp. We need to roll our own loop.
361 */
362bool os::platform_print_native_stack(outputStream* st, void* context,
363                                     char *buf, int buf_size)
364{
365  CONTEXT ctx;
366  if (context != NULL) {
367    memcpy(&ctx, context, sizeof(ctx));
368  } else {
369    RtlCaptureContext(&ctx);
370  }
371
372  st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
373
374  STACKFRAME stk;
375  memset(&stk, 0, sizeof(stk));
376  stk.AddrStack.Offset    = ctx.Rsp;
377  stk.AddrStack.Mode      = AddrModeFlat;
378  stk.AddrFrame.Offset    = ctx.Rbp;
379  stk.AddrFrame.Mode      = AddrModeFlat;
380  stk.AddrPC.Offset       = ctx.Rip;
381  stk.AddrPC.Mode         = AddrModeFlat;
382
383  int count = 0;
384  address lastpc = 0;
385  while (count++ < StackPrintLimit) {
386    intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
387    intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
388    address pc = (address)stk.AddrPC.Offset;
389
390    if (pc != NULL && sp != NULL && fp != NULL) {
391      if (count == 2 && lastpc == pc) {
392        // Skip it -- StackWalk64() may return the same PC
393        // (but different SP) on the first try.
394      } else {
395        // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
396        // may not contain what Java expects, and may cause the frame() constructor
397        // to crash. Let's just print out the symbolic address.
398        frame::print_C_frame(st, buf, buf_size, pc);
399        st->cr();
400      }
401      lastpc = pc;
402    } else {
403      break;
404    }
405
406    PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
407    if (!p) {
408      // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
409      break;
410    }
411
412    BOOL result = WindowsDbgHelp::StackWalk64(
413        IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
414        GetCurrentProcess(),       // __in      HANDLE hProcess,
415        GetCurrentThread(),        // __in      HANDLE hThread,
416        &stk,                      // __inout   LP STACKFRAME64 StackFrame,
417        &ctx,                      // __inout   PVOID ContextRecord,
418        NULL,                      // __in_opt  PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
419        WindowsDbgHelp::pfnSymFunctionTableAccess64(),
420                                   // __in_opt  PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
421        WindowsDbgHelp::pfnSymGetModuleBase64(),
422                                   // __in_opt  PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
423        NULL);                     // __in_opt  PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
424
425    if (!result) {
426      break;
427    }
428  }
429  if (count > StackPrintLimit) {
430    st->print_cr("...<more frames>...");
431  }
432  st->cr();
433
434  return true;
435}
436#endif // AMD64
437
438ExtendedPC os::fetch_frame_from_context(void* ucVoid,
439                    intptr_t** ret_sp, intptr_t** ret_fp) {
440
441  ExtendedPC  epc;
442  CONTEXT* uc = (CONTEXT*)ucVoid;
443
444  if (uc != NULL) {
445    epc = ExtendedPC((address)uc->REG_PC);
446    if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
447    if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
448  } else {
449    // construct empty ExtendedPC for return value checking
450    epc = ExtendedPC(NULL);
451    if (ret_sp) *ret_sp = (intptr_t *)NULL;
452    if (ret_fp) *ret_fp = (intptr_t *)NULL;
453  }
454
455  return epc;
456}
457
458frame os::fetch_frame_from_context(void* ucVoid) {
459  intptr_t* sp;
460  intptr_t* fp;
461  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
462  return frame(sp, fp, epc.pc());
463}
464
465// VC++ does not save frame pointer on stack in optimized build. It
466// can be turned off by /Oy-. If we really want to walk C frames,
467// we can use the StackWalk() API.
468frame os::get_sender_for_C_frame(frame* fr) {
469  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
470}
471
472#ifndef AMD64
473// Returns an estimate of the current stack pointer. Result must be guaranteed
474// to point into the calling threads stack, and be no lower than the current
475// stack pointer.
476address os::current_stack_pointer() {
477  int dummy;
478  address sp = (address)&dummy;
479  return sp;
480}
481#else
482// Returns the current stack pointer. Accurate value needed for
483// os::verify_stack_alignment().
484address os::current_stack_pointer() {
485  typedef address get_sp_func();
486  get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*,
487                                     StubRoutines::x86::get_previous_sp_entry());
488  return (*func)();
489}
490#endif
491
492
493#ifndef AMD64
494intptr_t* _get_previous_fp() {
495  intptr_t **frameptr;
496  __asm {
497    mov frameptr, ebp
498  };
499  return *frameptr;
500}
501#endif // !AMD64
502
503frame os::current_frame() {
504
505#ifdef AMD64
506  // apparently _asm not supported on windows amd64
507  typedef intptr_t*      get_fp_func           ();
508  get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
509                                     StubRoutines::x86::get_previous_fp_entry());
510  if (func == NULL) return frame();
511  intptr_t* fp = (*func)();
512  if (fp == NULL) {
513    return frame();
514  }
515#else
516  intptr_t* fp = _get_previous_fp();
517#endif // AMD64
518
519  frame myframe((intptr_t*)os::current_stack_pointer(),
520                (intptr_t*)fp,
521                CAST_FROM_FN_PTR(address, os::current_frame));
522  if (os::is_first_C_frame(&myframe)) {
523    // stack is not walkable
524    return frame();
525  } else {
526    return os::get_sender_for_C_frame(&myframe);
527  }
528}
529
530void os::print_context(outputStream *st, void *context) {
531  if (context == NULL) return;
532
533  CONTEXT* uc = (CONTEXT*)context;
534
535  st->print_cr("Registers:");
536#ifdef AMD64
537  st->print(  "RAX=" INTPTR_FORMAT, uc->Rax);
538  st->print(", RBX=" INTPTR_FORMAT, uc->Rbx);
539  st->print(", RCX=" INTPTR_FORMAT, uc->Rcx);
540  st->print(", RDX=" INTPTR_FORMAT, uc->Rdx);
541  st->cr();
542  st->print(  "RSP=" INTPTR_FORMAT, uc->Rsp);
543  st->print(", RBP=" INTPTR_FORMAT, uc->Rbp);
544  st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
545  st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
546  st->cr();
547  st->print(  "R8 =" INTPTR_FORMAT, uc->R8);
548  st->print(", R9 =" INTPTR_FORMAT, uc->R9);
549  st->print(", R10=" INTPTR_FORMAT, uc->R10);
550  st->print(", R11=" INTPTR_FORMAT, uc->R11);
551  st->cr();
552  st->print(  "R12=" INTPTR_FORMAT, uc->R12);
553  st->print(", R13=" INTPTR_FORMAT, uc->R13);
554  st->print(", R14=" INTPTR_FORMAT, uc->R14);
555  st->print(", R15=" INTPTR_FORMAT, uc->R15);
556  st->cr();
557  st->print(  "RIP=" INTPTR_FORMAT, uc->Rip);
558  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
559#else
560  st->print(  "EAX=" INTPTR_FORMAT, uc->Eax);
561  st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
562  st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
563  st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
564  st->cr();
565  st->print(  "ESP=" INTPTR_FORMAT, uc->Esp);
566  st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
567  st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
568  st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
569  st->cr();
570  st->print(  "EIP=" INTPTR_FORMAT, uc->Eip);
571  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
572#endif // AMD64
573  st->cr();
574  st->cr();
575
576  intptr_t *sp = (intptr_t *)uc->REG_SP;
577  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
578  print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
579  st->cr();
580
581  // Note: it may be unsafe to inspect memory near pc. For example, pc may
582  // point to garbage if entry point in an nmethod is corrupted. Leave
583  // this at the end, and hope for the best.
584  address pc = (address)uc->REG_PC;
585  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
586  print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
587  st->cr();
588}
589
590
591void os::print_register_info(outputStream *st, void *context) {
592  if (context == NULL) return;
593
594  CONTEXT* uc = (CONTEXT*)context;
595
596  st->print_cr("Register to memory mapping:");
597  st->cr();
598
599  // this is only for the "general purpose" registers
600
601#ifdef AMD64
602  st->print("RAX="); print_location(st, uc->Rax);
603  st->print("RBX="); print_location(st, uc->Rbx);
604  st->print("RCX="); print_location(st, uc->Rcx);
605  st->print("RDX="); print_location(st, uc->Rdx);
606  st->print("RSP="); print_location(st, uc->Rsp);
607  st->print("RBP="); print_location(st, uc->Rbp);
608  st->print("RSI="); print_location(st, uc->Rsi);
609  st->print("RDI="); print_location(st, uc->Rdi);
610  st->print("R8 ="); print_location(st, uc->R8);
611  st->print("R9 ="); print_location(st, uc->R9);
612  st->print("R10="); print_location(st, uc->R10);
613  st->print("R11="); print_location(st, uc->R11);
614  st->print("R12="); print_location(st, uc->R12);
615  st->print("R13="); print_location(st, uc->R13);
616  st->print("R14="); print_location(st, uc->R14);
617  st->print("R15="); print_location(st, uc->R15);
618#else
619  st->print("EAX="); print_location(st, uc->Eax);
620  st->print("EBX="); print_location(st, uc->Ebx);
621  st->print("ECX="); print_location(st, uc->Ecx);
622  st->print("EDX="); print_location(st, uc->Edx);
623  st->print("ESP="); print_location(st, uc->Esp);
624  st->print("EBP="); print_location(st, uc->Ebp);
625  st->print("ESI="); print_location(st, uc->Esi);
626  st->print("EDI="); print_location(st, uc->Edi);
627#endif
628
629  st->cr();
630}
631
632extern "C" int SpinPause () {
633#ifdef AMD64
634   return 0 ;
635#else
636   // pause == rep:nop
637   // On systems that don't support pause a rep:nop
638   // is executed as a nop.  The rep: prefix is ignored.
639   _asm {
640      pause ;
641   };
642   return 1 ;
643#endif // AMD64
644}
645
646
647void os::setup_fpu() {
648#ifndef AMD64
649  int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
650  __asm fldcw fpu_cntrl_word;
651#endif // !AMD64
652}
653
654#ifndef PRODUCT
655void os::verify_stack_alignment() {
656#ifdef AMD64
657  // The current_stack_pointer() calls generated get_previous_sp stub routine.
658  // Only enable the assert after the routine becomes available.
659  if (StubRoutines::code1() != NULL) {
660    assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
661  }
662#endif
663}
664#endif
665
666int os::extra_bang_size_in_bytes() {
667  // JDK-8050147 requires the full cache line bang for x86.
668  return VM_Version::L1_line_size();
669}
670