os_windows_x86.cpp revision 1887:828eafbd85cc
1/*
2 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// no precompiled headers
26#include "assembler_x86.inline.hpp"
27#include "classfile/classLoader.hpp"
28#include "classfile/systemDictionary.hpp"
29#include "classfile/vmSymbols.hpp"
30#include "code/icBuffer.hpp"
31#include "code/vtableStubs.hpp"
32#include "interpreter/interpreter.hpp"
33#include "jvm_windows.h"
34#include "memory/allocation.inline.hpp"
35#include "mutex_windows.inline.hpp"
36#include "nativeInst_x86.hpp"
37#include "os_share_windows.hpp"
38#include "prims/jniFastGetField.hpp"
39#include "prims/jvm.h"
40#include "prims/jvm_misc.hpp"
41#include "runtime/arguments.hpp"
42#include "runtime/extendedPC.hpp"
43#include "runtime/frame.inline.hpp"
44#include "runtime/interfaceSupport.hpp"
45#include "runtime/java.hpp"
46#include "runtime/javaCalls.hpp"
47#include "runtime/mutexLocker.hpp"
48#include "runtime/osThread.hpp"
49#include "runtime/sharedRuntime.hpp"
50#include "runtime/stubRoutines.hpp"
51#include "runtime/timer.hpp"
52#include "thread_windows.inline.hpp"
53#include "utilities/events.hpp"
54#include "utilities/vmError.hpp"
55#ifdef COMPILER1
56#include "c1/c1_Runtime1.hpp"
57#endif
58#ifdef COMPILER2
59#include "opto/runtime.hpp"
60#endif
61
62# include "unwind_windows_x86.hpp"
63#undef REG_SP
64#undef REG_FP
65#undef REG_PC
66#ifdef AMD64
67#define REG_SP Rsp
68#define REG_FP Rbp
69#define REG_PC Rip
70#else
71#define REG_SP Esp
72#define REG_FP Ebp
73#define REG_PC Eip
74#endif // AMD64
75
76extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
77
78// Install a win32 structured exception handler around thread.
79void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
80  __try {
81
82#ifndef AMD64
83    // We store the current thread in this wrapperthread location
84    // and determine how far away this address is from the structured
85    // execption pointer that FS:[0] points to.  This get_thread
86    // code can then get the thread pointer via FS.
87    //
88    // Warning:  This routine must NEVER be inlined since we'd end up with
89    //           multiple offsets.
90    //
91    volatile Thread* wrapperthread = thread;
92
93    if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) {
94      int thread_ptr_offset;
95      __asm {
96        lea eax, dword ptr wrapperthread;
97        sub eax, dword ptr FS:[0H];
98        mov thread_ptr_offset, eax
99      };
100      ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset);
101    }
102#ifdef ASSERT
103    // Verify that the offset hasn't changed since we initally captured
104    // it. This might happen if we accidentally ended up with an
105    // inlined version of this routine.
106    else {
107      int test_thread_ptr_offset;
108      __asm {
109        lea eax, dword ptr wrapperthread;
110        sub eax, dword ptr FS:[0H];
111        mov test_thread_ptr_offset, eax
112      };
113      assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(),
114             "thread pointer offset from SEH changed");
115    }
116#endif // ASSERT
117#endif // !AMD64
118
119    f(value, method, args, thread);
120  } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
121      // Nothing to do.
122  }
123}
124
125#ifdef AMD64
126
127// This is the language specific handler for exceptions
128// originating from dynamically generated code.
129// We call the standard structured exception handler
130// We only expect Continued Execution since we cannot unwind
131// from generated code.
132LONG HandleExceptionFromCodeCache(
133  IN PEXCEPTION_RECORD ExceptionRecord,
134  IN ULONG64 EstablisherFrame,
135  IN OUT PCONTEXT ContextRecord,
136  IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
137  EXCEPTION_POINTERS ep;
138  LONG result;
139
140  ep.ExceptionRecord = ExceptionRecord;
141  ep.ContextRecord = ContextRecord;
142
143  result = topLevelExceptionFilter(&ep);
144
145  // We better only get a CONTINUE_EXECUTION from our handler
146  // since we don't have unwind information registered.
147
148  guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
149             "Unexpected result from topLevelExceptionFilter");
150
151  return(ExceptionContinueExecution);
152}
153
154
155// Structure containing the Windows Data Structures required
156// to register our Code Cache exception handler.
157// We put these in the CodeCache since the API requires
158// all addresses in these structures are relative to the Code
159// area registered with RtlAddFunctionTable.
160typedef struct {
161  char ExceptionHandlerInstr[16];  // jmp HandleExceptionFromCodeCache
162  RUNTIME_FUNCTION rt;
163  UNWIND_INFO_EH_ONLY unw;
164} DynamicCodeData, *pDynamicCodeData;
165
166#endif // AMD64
167//
168// Register our CodeCache area with the OS so it will dispatch exceptions
169// to our topLevelExceptionFilter when we take an exception in our
170// dynamically generated code.
171//
172// Arguments:  low and high are the address of the full reserved
173// codeCache area
174//
175bool os::register_code_area(char *low, char *high) {
176#ifdef AMD64
177
178  ResourceMark rm;
179
180  pDynamicCodeData pDCD;
181  PRUNTIME_FUNCTION prt;
182  PUNWIND_INFO_EH_ONLY punwind;
183
184  // If we are using Vectored Exceptions we don't need this registration
185  if (UseVectoredExceptions) return true;
186
187  BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
188  CodeBuffer cb(blob);
189  MacroAssembler* masm = new MacroAssembler(&cb);
190  pDCD = (pDynamicCodeData) masm->pc();
191
192  masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
193  masm->flush();
194
195  // Create an Unwind Structure specifying no unwind info
196  // other than an Exception Handler
197  punwind = &pDCD->unw;
198  punwind->Version = 1;
199  punwind->Flags = UNW_FLAG_EHANDLER;
200  punwind->SizeOfProlog = 0;
201  punwind->CountOfCodes = 0;
202  punwind->FrameRegister = 0;
203  punwind->FrameOffset = 0;
204  punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
205                              (char*)low;
206  punwind->ExceptionData[0] = 0;
207
208  // This structure describes the covered dynamic code area.
209  // Addresses are relative to the beginning on the code cache area
210  prt = &pDCD->rt;
211  prt->BeginAddress = 0;
212  prt->EndAddress = (ULONG)(high - low);
213  prt->UnwindData = ((char *)punwind - low);
214
215  guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
216            "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
217
218#endif // AMD64
219  return true;
220}
221
222void os::initialize_thread() {
223// Nothing to do.
224}
225
226// Atomics and Stub Functions
227
228typedef jint      xchg_func_t            (jint,     volatile jint*);
229typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
230typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
231typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
232typedef jint      add_func_t             (jint,     volatile jint*);
233typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
234
235#ifdef AMD64
236
237jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
238  // try to use the stub:
239  xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
240
241  if (func != NULL) {
242    os::atomic_xchg_func = func;
243    return (*func)(exchange_value, dest);
244  }
245  assert(Threads::number_of_threads() == 0, "for bootstrap only");
246
247  jint old_value = *dest;
248  *dest = exchange_value;
249  return old_value;
250}
251
252intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
253  // try to use the stub:
254  xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
255
256  if (func != NULL) {
257    os::atomic_xchg_ptr_func = func;
258    return (*func)(exchange_value, dest);
259  }
260  assert(Threads::number_of_threads() == 0, "for bootstrap only");
261
262  intptr_t old_value = *dest;
263  *dest = exchange_value;
264  return old_value;
265}
266
267
268jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
269  // try to use the stub:
270  cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
271
272  if (func != NULL) {
273    os::atomic_cmpxchg_func = func;
274    return (*func)(exchange_value, dest, compare_value);
275  }
276  assert(Threads::number_of_threads() == 0, "for bootstrap only");
277
278  jint old_value = *dest;
279  if (old_value == compare_value)
280    *dest = exchange_value;
281  return old_value;
282}
283#endif // AMD64
284
285jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
286  // try to use the stub:
287  cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
288
289  if (func != NULL) {
290    os::atomic_cmpxchg_long_func = func;
291    return (*func)(exchange_value, dest, compare_value);
292  }
293  assert(Threads::number_of_threads() == 0, "for bootstrap only");
294
295  jlong old_value = *dest;
296  if (old_value == compare_value)
297    *dest = exchange_value;
298  return old_value;
299}
300
301#ifdef AMD64
302
303jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
304  // try to use the stub:
305  add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
306
307  if (func != NULL) {
308    os::atomic_add_func = func;
309    return (*func)(add_value, dest);
310  }
311  assert(Threads::number_of_threads() == 0, "for bootstrap only");
312
313  return (*dest) += add_value;
314}
315
316intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
317  // try to use the stub:
318  add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
319
320  if (func != NULL) {
321    os::atomic_add_ptr_func = func;
322    return (*func)(add_value, dest);
323  }
324  assert(Threads::number_of_threads() == 0, "for bootstrap only");
325
326  return (*dest) += add_value;
327}
328
329xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
330xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
331cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
332add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
333add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
334
335#endif // AMD64
336
337cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
338
339ExtendedPC os::fetch_frame_from_context(void* ucVoid,
340                    intptr_t** ret_sp, intptr_t** ret_fp) {
341
342  ExtendedPC  epc;
343  CONTEXT* uc = (CONTEXT*)ucVoid;
344
345  if (uc != NULL) {
346    epc = ExtendedPC((address)uc->REG_PC);
347    if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
348    if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
349  } else {
350    // construct empty ExtendedPC for return value checking
351    epc = ExtendedPC(NULL);
352    if (ret_sp) *ret_sp = (intptr_t *)NULL;
353    if (ret_fp) *ret_fp = (intptr_t *)NULL;
354  }
355
356  return epc;
357}
358
359frame os::fetch_frame_from_context(void* ucVoid) {
360  intptr_t* sp;
361  intptr_t* fp;
362  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
363  return frame(sp, fp, epc.pc());
364}
365
366// VC++ does not save frame pointer on stack in optimized build. It
367// can be turned off by /Oy-. If we really want to walk C frames,
368// we can use the StackWalk() API.
369frame os::get_sender_for_C_frame(frame* fr) {
370  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
371}
372
373
374#ifndef AMD64
375intptr_t* _get_previous_fp() {
376  intptr_t **frameptr;
377  __asm {
378    mov frameptr, ebp
379  };
380  return *frameptr;
381}
382#endif // !AMD64
383
384frame os::current_frame() {
385
386#ifdef AMD64
387  // apparently _asm not supported on windows amd64
388  typedef intptr_t*      get_fp_func           ();
389  get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
390                                     StubRoutines::x86::get_previous_fp_entry());
391  if (func == NULL) return frame(NULL, NULL, NULL);
392  intptr_t* fp = (*func)();
393#else
394  intptr_t* fp = _get_previous_fp();
395#endif // AMD64
396
397  frame myframe((intptr_t*)os::current_stack_pointer(),
398                (intptr_t*)fp,
399                CAST_FROM_FN_PTR(address, os::current_frame));
400  if (os::is_first_C_frame(&myframe)) {
401    // stack is not walkable
402    return frame(NULL, NULL, NULL);
403  } else {
404    return os::get_sender_for_C_frame(&myframe);
405  }
406}
407
408void os::print_context(outputStream *st, void *context) {
409  if (context == NULL) return;
410
411  CONTEXT* uc = (CONTEXT*)context;
412
413  st->print_cr("Registers:");
414#ifdef AMD64
415  st->print(  "RAX=" INTPTR_FORMAT, uc->Rax);
416  st->print(", RBX=" INTPTR_FORMAT, uc->Rbx);
417  st->print(", RCX=" INTPTR_FORMAT, uc->Rcx);
418  st->print(", RDX=" INTPTR_FORMAT, uc->Rdx);
419  st->cr();
420  st->print(  "RSP=" INTPTR_FORMAT, uc->Rsp);
421  st->print(", RBP=" INTPTR_FORMAT, uc->Rbp);
422  st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
423  st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
424  st->cr();
425  st->print(  "R8 =" INTPTR_FORMAT, uc->R8);
426  st->print(", R9 =" INTPTR_FORMAT, uc->R9);
427  st->print(", R10=" INTPTR_FORMAT, uc->R10);
428  st->print(", R11=" INTPTR_FORMAT, uc->R11);
429  st->cr();
430  st->print(  "R12=" INTPTR_FORMAT, uc->R12);
431  st->print(", R13=" INTPTR_FORMAT, uc->R13);
432  st->print(", R14=" INTPTR_FORMAT, uc->R14);
433  st->print(", R15=" INTPTR_FORMAT, uc->R15);
434  st->cr();
435  st->print(  "RIP=" INTPTR_FORMAT, uc->Rip);
436  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
437#else
438  st->print(  "EAX=" INTPTR_FORMAT, uc->Eax);
439  st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
440  st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
441  st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
442  st->cr();
443  st->print(  "ESP=" INTPTR_FORMAT, uc->Esp);
444  st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
445  st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
446  st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
447  st->cr();
448  st->print(  "EIP=" INTPTR_FORMAT, uc->Eip);
449  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
450#endif // AMD64
451  st->cr();
452  st->cr();
453
454  intptr_t *sp = (intptr_t *)uc->REG_SP;
455  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
456  print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
457  st->cr();
458
459  // Note: it may be unsafe to inspect memory near pc. For example, pc may
460  // point to garbage if entry point in an nmethod is corrupted. Leave
461  // this at the end, and hope for the best.
462  address pc = (address)uc->REG_PC;
463  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
464  print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
465  st->cr();
466}
467
468
469void os::print_register_info(outputStream *st, void *context) {
470  if (context == NULL) return;
471
472  CONTEXT* uc = (CONTEXT*)context;
473
474  st->print_cr("Register to memory mapping:");
475  st->cr();
476
477  // this is only for the "general purpose" registers
478
479#ifdef AMD64
480  st->print("RAX="); print_location(st, uc->Rax);
481  st->print("RBX="); print_location(st, uc->Rbx);
482  st->print("RCX="); print_location(st, uc->Rcx);
483  st->print("RDX="); print_location(st, uc->Rdx);
484  st->print("RSP="); print_location(st, uc->Rsp);
485  st->print("RBP="); print_location(st, uc->Rbp);
486  st->print("RSI="); print_location(st, uc->Rsi);
487  st->print("RDI="); print_location(st, uc->Rdi);
488  st->print("R8 ="); print_location(st, uc->R8);
489  st->print("R9 ="); print_location(st, uc->R9);
490  st->print("R10="); print_location(st, uc->R10);
491  st->print("R11="); print_location(st, uc->R11);
492  st->print("R12="); print_location(st, uc->R12);
493  st->print("R13="); print_location(st, uc->R13);
494  st->print("R14="); print_location(st, uc->R14);
495  st->print("R15="); print_location(st, uc->R15);
496#else
497  st->print("EAX="); print_location(st, uc->Eax);
498  st->print("EBX="); print_location(st, uc->Ebx);
499  st->print("ECX="); print_location(st, uc->Ecx);
500  st->print("EDX="); print_location(st, uc->Edx);
501  st->print("ESP="); print_location(st, uc->Esp);
502  st->print("EBP="); print_location(st, uc->Ebp);
503  st->print("ESI="); print_location(st, uc->Esi);
504  st->print("EDI="); print_location(st, uc->Edi);
505#endif
506
507  st->cr();
508}
509
510extern "C" int SafeFetch32 (int * adr, int Err) {
511   int rv = Err ;
512   _try {
513       rv = *((volatile int *) adr) ;
514   } __except(EXCEPTION_EXECUTE_HANDLER) {
515   }
516   return rv ;
517}
518
519extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) {
520   intptr_t rv = Err ;
521   _try {
522       rv = *((volatile intptr_t *) adr) ;
523   } __except(EXCEPTION_EXECUTE_HANDLER) {
524   }
525   return rv ;
526}
527
528extern "C" int SpinPause () {
529#ifdef AMD64
530   return 0 ;
531#else
532   // pause == rep:nop
533   // On systems that don't support pause a rep:nop
534   // is executed as a nop.  The rep: prefix is ignored.
535   _asm {
536      pause ;
537   };
538   return 1 ;
539#endif // AMD64
540}
541
542
543void os::setup_fpu() {
544#ifndef AMD64
545  int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
546  __asm fldcw fpu_cntrl_word;
547#endif // !AMD64
548}
549