os_windows_x86.cpp revision 3718:b9a9ed0f8eeb
11592Srgrimes/*
250476Speter * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
31592Srgrimes * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
441051Sdima *
553909Speter * This code is free software; you can redistribute it and/or modify it
653909Speter * under the terms of the GNU General Public License version 2 only, as
753909Speter * published by the Free Software Foundation.
853909Speter *
953909Speter * This code is distributed in the hope that it will be useful, but WITHOUT
1053909Speter * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1153909Speter * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
1253909Speter * version 2 for more details (a copy is included in the LICENSE file that
1353909Speter * accompanied this code).
1453909Speter *
1553909Speter * You should have received a copy of the GNU General Public License version
1653909Speter * 2 along with this work; if not, write to the Free Software Foundation,
1753909Speter * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
1853909Speter *
1953909Speter * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
2053909Speter * or visit www.oracle.com if you need additional information or have any
2153909Speter * questions.
2253909Speter *
2353909Speter */
2453909Speter
2570922Sdougb// no precompiled headers
2653909Speter#include "assembler_x86.inline.hpp"
2753909Speter#include "classfile/classLoader.hpp"
2853909Speter#include "classfile/systemDictionary.hpp"
2953909Speter#include "classfile/vmSymbols.hpp"
301592Srgrimes#include "code/icBuffer.hpp"
3153932Speter#include "code/vtableStubs.hpp"
3253909Speter#include "interpreter/interpreter.hpp"
3353909Speter#include "jvm_windows.h"
3453909Speter#include "memory/allocation.inline.hpp"
3538101Speter#include "mutex_windows.inline.hpp"
3638101Speter#include "nativeInst_x86.hpp"
3738101Speter#include "os_share_windows.hpp"
3838101Speter#include "prims/jniFastGetField.hpp"
3958532Sdan#include "prims/jvm.h"
4058532Sdan#include "prims/jvm_misc.hpp"
4158532Sdan#include "runtime/arguments.hpp"
4258532Sdan#include "runtime/extendedPC.hpp"
4338632Sjb#include "runtime/frame.inline.hpp"
4436932Speter#include "runtime/interfaceSupport.hpp"
4536923Speter#include "runtime/java.hpp"
4638823Sjb#include "runtime/javaCalls.hpp"
4738632Sjb#include "runtime/mutexLocker.hpp"
4834195Sjdp#include "runtime/osThread.hpp"
4934195Sjdp#include "runtime/sharedRuntime.hpp"
5034195Sjdp#include "runtime/stubRoutines.hpp"
5151995Smarkm#include "runtime/timer.hpp"
5214078Sjoerg#include "thread_windows.inline.hpp"
5314078Sjoerg#include "utilities/events.hpp"
5414078Sjoerg#include "utilities/vmError.hpp"
559962Smarkm
561592Srgrimes# include "unwind_windows_x86.hpp"
57#undef REG_SP
58#undef REG_FP
59#undef REG_PC
60#ifdef AMD64
61#define REG_SP Rsp
62#define REG_FP Rbp
63#define REG_PC Rip
64#else
65#define REG_SP Esp
66#define REG_FP Ebp
67#define REG_PC Eip
68#endif // AMD64
69
70extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
71
72// Install a win32 structured exception handler around thread.
73void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
74  __try {
75
76#ifndef AMD64
77    // We store the current thread in this wrapperthread location
78    // and determine how far away this address is from the structured
79    // execption pointer that FS:[0] points to.  This get_thread
80    // code can then get the thread pointer via FS.
81    //
82    // Warning:  This routine must NEVER be inlined since we'd end up with
83    //           multiple offsets.
84    //
85    volatile Thread* wrapperthread = thread;
86
87    if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) {
88      int thread_ptr_offset;
89      __asm {
90        lea eax, dword ptr wrapperthread;
91        sub eax, dword ptr FS:[0H];
92        mov thread_ptr_offset, eax
93      };
94      ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset);
95    }
96#ifdef ASSERT
97    // Verify that the offset hasn't changed since we initally captured
98    // it. This might happen if we accidentally ended up with an
99    // inlined version of this routine.
100    else {
101      int test_thread_ptr_offset;
102      __asm {
103        lea eax, dword ptr wrapperthread;
104        sub eax, dword ptr FS:[0H];
105        mov test_thread_ptr_offset, eax
106      };
107      assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(),
108             "thread pointer offset from SEH changed");
109    }
110#endif // ASSERT
111#endif // !AMD64
112
113    f(value, method, args, thread);
114  } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
115      // Nothing to do.
116  }
117}
118
119#ifdef AMD64
120
121// This is the language specific handler for exceptions
122// originating from dynamically generated code.
123// We call the standard structured exception handler
124// We only expect Continued Execution since we cannot unwind
125// from generated code.
126LONG HandleExceptionFromCodeCache(
127  IN PEXCEPTION_RECORD ExceptionRecord,
128  IN ULONG64 EstablisherFrame,
129  IN OUT PCONTEXT ContextRecord,
130  IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
131  EXCEPTION_POINTERS ep;
132  LONG result;
133
134  ep.ExceptionRecord = ExceptionRecord;
135  ep.ContextRecord = ContextRecord;
136
137  result = topLevelExceptionFilter(&ep);
138
139  // We better only get a CONTINUE_EXECUTION from our handler
140  // since we don't have unwind information registered.
141
142  guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
143             "Unexpected result from topLevelExceptionFilter");
144
145  return(ExceptionContinueExecution);
146}
147
148
149// Structure containing the Windows Data Structures required
150// to register our Code Cache exception handler.
151// We put these in the CodeCache since the API requires
152// all addresses in these structures are relative to the Code
153// area registered with RtlAddFunctionTable.
154typedef struct {
155  char ExceptionHandlerInstr[16];  // jmp HandleExceptionFromCodeCache
156  RUNTIME_FUNCTION rt;
157  UNWIND_INFO_EH_ONLY unw;
158} DynamicCodeData, *pDynamicCodeData;
159
160#endif // AMD64
161//
162// Register our CodeCache area with the OS so it will dispatch exceptions
163// to our topLevelExceptionFilter when we take an exception in our
164// dynamically generated code.
165//
166// Arguments:  low and high are the address of the full reserved
167// codeCache area
168//
169bool os::register_code_area(char *low, char *high) {
170#ifdef AMD64
171
172  ResourceMark rm;
173
174  pDynamicCodeData pDCD;
175  PRUNTIME_FUNCTION prt;
176  PUNWIND_INFO_EH_ONLY punwind;
177
178  // If we are using Vectored Exceptions we don't need this registration
179  if (UseVectoredExceptions) return true;
180
181  BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
182  CodeBuffer cb(blob);
183  MacroAssembler* masm = new MacroAssembler(&cb);
184  pDCD = (pDynamicCodeData) masm->pc();
185
186  masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
187  masm->flush();
188
189  // Create an Unwind Structure specifying no unwind info
190  // other than an Exception Handler
191  punwind = &pDCD->unw;
192  punwind->Version = 1;
193  punwind->Flags = UNW_FLAG_EHANDLER;
194  punwind->SizeOfProlog = 0;
195  punwind->CountOfCodes = 0;
196  punwind->FrameRegister = 0;
197  punwind->FrameOffset = 0;
198  punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
199                              (char*)low;
200  punwind->ExceptionData[0] = 0;
201
202  // This structure describes the covered dynamic code area.
203  // Addresses are relative to the beginning on the code cache area
204  prt = &pDCD->rt;
205  prt->BeginAddress = 0;
206  prt->EndAddress = (ULONG)(high - low);
207  prt->UnwindData = ((char *)punwind - low);
208
209  guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
210            "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
211
212#endif // AMD64
213  return true;
214}
215
216void os::initialize_thread(Thread* thr) {
217// Nothing to do.
218}
219
220// Atomics and Stub Functions
221
222typedef jint      xchg_func_t            (jint,     volatile jint*);
223typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
224typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
225typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
226typedef jint      add_func_t             (jint,     volatile jint*);
227typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
228
229#ifdef AMD64
230
231jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
232  // try to use the stub:
233  xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
234
235  if (func != NULL) {
236    os::atomic_xchg_func = func;
237    return (*func)(exchange_value, dest);
238  }
239  assert(Threads::number_of_threads() == 0, "for bootstrap only");
240
241  jint old_value = *dest;
242  *dest = exchange_value;
243  return old_value;
244}
245
246intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
247  // try to use the stub:
248  xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
249
250  if (func != NULL) {
251    os::atomic_xchg_ptr_func = func;
252    return (*func)(exchange_value, dest);
253  }
254  assert(Threads::number_of_threads() == 0, "for bootstrap only");
255
256  intptr_t old_value = *dest;
257  *dest = exchange_value;
258  return old_value;
259}
260
261
262jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
263  // try to use the stub:
264  cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
265
266  if (func != NULL) {
267    os::atomic_cmpxchg_func = func;
268    return (*func)(exchange_value, dest, compare_value);
269  }
270  assert(Threads::number_of_threads() == 0, "for bootstrap only");
271
272  jint old_value = *dest;
273  if (old_value == compare_value)
274    *dest = exchange_value;
275  return old_value;
276}
277#endif // AMD64
278
279jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
280  // try to use the stub:
281  cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
282
283  if (func != NULL) {
284    os::atomic_cmpxchg_long_func = func;
285    return (*func)(exchange_value, dest, compare_value);
286  }
287  assert(Threads::number_of_threads() == 0, "for bootstrap only");
288
289  jlong old_value = *dest;
290  if (old_value == compare_value)
291    *dest = exchange_value;
292  return old_value;
293}
294
295#ifdef AMD64
296
297jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
298  // try to use the stub:
299  add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
300
301  if (func != NULL) {
302    os::atomic_add_func = func;
303    return (*func)(add_value, dest);
304  }
305  assert(Threads::number_of_threads() == 0, "for bootstrap only");
306
307  return (*dest) += add_value;
308}
309
310intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
311  // try to use the stub:
312  add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
313
314  if (func != NULL) {
315    os::atomic_add_ptr_func = func;
316    return (*func)(add_value, dest);
317  }
318  assert(Threads::number_of_threads() == 0, "for bootstrap only");
319
320  return (*dest) += add_value;
321}
322
323xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
324xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
325cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
326add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
327add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
328
329#endif // AMD64
330
331cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
332
333ExtendedPC os::fetch_frame_from_context(void* ucVoid,
334                    intptr_t** ret_sp, intptr_t** ret_fp) {
335
336  ExtendedPC  epc;
337  CONTEXT* uc = (CONTEXT*)ucVoid;
338
339  if (uc != NULL) {
340    epc = ExtendedPC((address)uc->REG_PC);
341    if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
342    if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
343  } else {
344    // construct empty ExtendedPC for return value checking
345    epc = ExtendedPC(NULL);
346    if (ret_sp) *ret_sp = (intptr_t *)NULL;
347    if (ret_fp) *ret_fp = (intptr_t *)NULL;
348  }
349
350  return epc;
351}
352
353frame os::fetch_frame_from_context(void* ucVoid) {
354  intptr_t* sp;
355  intptr_t* fp;
356  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
357  return frame(sp, fp, epc.pc());
358}
359
360// VC++ does not save frame pointer on stack in optimized build. It
361// can be turned off by /Oy-. If we really want to walk C frames,
362// we can use the StackWalk() API.
363frame os::get_sender_for_C_frame(frame* fr) {
364  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
365}
366
367#ifndef AMD64
368// Returns an estimate of the current stack pointer. Result must be guaranteed
369// to point into the calling threads stack, and be no lower than the current
370// stack pointer.
371address os::current_stack_pointer() {
372  int dummy;
373  address sp = (address)&dummy;
374  return sp;
375}
376#else
377// Returns the current stack pointer. Accurate value needed for
378// os::verify_stack_alignment().
379address os::current_stack_pointer() {
380  typedef address get_sp_func();
381  get_sp_func* func = CAST_TO_FN_PTR(get_sp_func*,
382                                     StubRoutines::x86::get_previous_sp_entry());
383  return (*func)();
384}
385#endif
386
387
388#ifndef AMD64
389intptr_t* _get_previous_fp() {
390  intptr_t **frameptr;
391  __asm {
392    mov frameptr, ebp
393  };
394  return *frameptr;
395}
396#endif // !AMD64
397
398frame os::current_frame() {
399
400#ifdef AMD64
401  // apparently _asm not supported on windows amd64
402  typedef intptr_t*      get_fp_func           ();
403  get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
404                                     StubRoutines::x86::get_previous_fp_entry());
405  if (func == NULL) return frame(NULL, NULL, NULL);
406  intptr_t* fp = (*func)();
407#else
408  intptr_t* fp = _get_previous_fp();
409#endif // AMD64
410
411  frame myframe((intptr_t*)os::current_stack_pointer(),
412                (intptr_t*)fp,
413                CAST_FROM_FN_PTR(address, os::current_frame));
414  if (os::is_first_C_frame(&myframe)) {
415    // stack is not walkable
416    return frame(NULL, NULL, NULL);
417  } else {
418    return os::get_sender_for_C_frame(&myframe);
419  }
420}
421
422void os::print_context(outputStream *st, void *context) {
423  if (context == NULL) return;
424
425  CONTEXT* uc = (CONTEXT*)context;
426
427  st->print_cr("Registers:");
428#ifdef AMD64
429  st->print(  "RAX=" INTPTR_FORMAT, uc->Rax);
430  st->print(", RBX=" INTPTR_FORMAT, uc->Rbx);
431  st->print(", RCX=" INTPTR_FORMAT, uc->Rcx);
432  st->print(", RDX=" INTPTR_FORMAT, uc->Rdx);
433  st->cr();
434  st->print(  "RSP=" INTPTR_FORMAT, uc->Rsp);
435  st->print(", RBP=" INTPTR_FORMAT, uc->Rbp);
436  st->print(", RSI=" INTPTR_FORMAT, uc->Rsi);
437  st->print(", RDI=" INTPTR_FORMAT, uc->Rdi);
438  st->cr();
439  st->print(  "R8 =" INTPTR_FORMAT, uc->R8);
440  st->print(", R9 =" INTPTR_FORMAT, uc->R9);
441  st->print(", R10=" INTPTR_FORMAT, uc->R10);
442  st->print(", R11=" INTPTR_FORMAT, uc->R11);
443  st->cr();
444  st->print(  "R12=" INTPTR_FORMAT, uc->R12);
445  st->print(", R13=" INTPTR_FORMAT, uc->R13);
446  st->print(", R14=" INTPTR_FORMAT, uc->R14);
447  st->print(", R15=" INTPTR_FORMAT, uc->R15);
448  st->cr();
449  st->print(  "RIP=" INTPTR_FORMAT, uc->Rip);
450  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
451#else
452  st->print(  "EAX=" INTPTR_FORMAT, uc->Eax);
453  st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
454  st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
455  st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
456  st->cr();
457  st->print(  "ESP=" INTPTR_FORMAT, uc->Esp);
458  st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
459  st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
460  st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
461  st->cr();
462  st->print(  "EIP=" INTPTR_FORMAT, uc->Eip);
463  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
464#endif // AMD64
465  st->cr();
466  st->cr();
467
468  intptr_t *sp = (intptr_t *)uc->REG_SP;
469  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
470  print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
471  st->cr();
472
473  // Note: it may be unsafe to inspect memory near pc. For example, pc may
474  // point to garbage if entry point in an nmethod is corrupted. Leave
475  // this at the end, and hope for the best.
476  address pc = (address)uc->REG_PC;
477  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
478  print_hex_dump(st, pc - 32, pc + 32, sizeof(char));
479  st->cr();
480}
481
482
483void os::print_register_info(outputStream *st, void *context) {
484  if (context == NULL) return;
485
486  CONTEXT* uc = (CONTEXT*)context;
487
488  st->print_cr("Register to memory mapping:");
489  st->cr();
490
491  // this is only for the "general purpose" registers
492
493#ifdef AMD64
494  st->print("RAX="); print_location(st, uc->Rax);
495  st->print("RBX="); print_location(st, uc->Rbx);
496  st->print("RCX="); print_location(st, uc->Rcx);
497  st->print("RDX="); print_location(st, uc->Rdx);
498  st->print("RSP="); print_location(st, uc->Rsp);
499  st->print("RBP="); print_location(st, uc->Rbp);
500  st->print("RSI="); print_location(st, uc->Rsi);
501  st->print("RDI="); print_location(st, uc->Rdi);
502  st->print("R8 ="); print_location(st, uc->R8);
503  st->print("R9 ="); print_location(st, uc->R9);
504  st->print("R10="); print_location(st, uc->R10);
505  st->print("R11="); print_location(st, uc->R11);
506  st->print("R12="); print_location(st, uc->R12);
507  st->print("R13="); print_location(st, uc->R13);
508  st->print("R14="); print_location(st, uc->R14);
509  st->print("R15="); print_location(st, uc->R15);
510#else
511  st->print("EAX="); print_location(st, uc->Eax);
512  st->print("EBX="); print_location(st, uc->Ebx);
513  st->print("ECX="); print_location(st, uc->Ecx);
514  st->print("EDX="); print_location(st, uc->Edx);
515  st->print("ESP="); print_location(st, uc->Esp);
516  st->print("EBP="); print_location(st, uc->Ebp);
517  st->print("ESI="); print_location(st, uc->Esi);
518  st->print("EDI="); print_location(st, uc->Edi);
519#endif
520
521  st->cr();
522}
523
524extern "C" int SafeFetch32 (int * adr, int Err) {
525   int rv = Err ;
526   _try {
527       rv = *((volatile int *) adr) ;
528   } __except(EXCEPTION_EXECUTE_HANDLER) {
529   }
530   return rv ;
531}
532
533extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) {
534   intptr_t rv = Err ;
535   _try {
536       rv = *((volatile intptr_t *) adr) ;
537   } __except(EXCEPTION_EXECUTE_HANDLER) {
538   }
539   return rv ;
540}
541
542extern "C" int SpinPause () {
543#ifdef AMD64
544   return 0 ;
545#else
546   // pause == rep:nop
547   // On systems that don't support pause a rep:nop
548   // is executed as a nop.  The rep: prefix is ignored.
549   _asm {
550      pause ;
551   };
552   return 1 ;
553#endif // AMD64
554}
555
556
557void os::setup_fpu() {
558#ifndef AMD64
559  int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
560  __asm fldcw fpu_cntrl_word;
561#endif // !AMD64
562}
563
564#ifndef PRODUCT
565void os::verify_stack_alignment() {
566#ifdef AMD64
567  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
568#endif
569}
570#endif
571