os_windows_x86.cpp revision 844:bd02caa94611
1/*
2 * Copyright 1999-2009 Sun Microsystems, Inc.  All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
24
25// do not include  precompiled  header file
26# include "incls/_os_windows_x86.cpp.incl"
27# include "unwind_windows_x86.hpp"
28#undef REG_SP
29#undef REG_FP
30#undef REG_PC
31#ifdef AMD64
32#define REG_SP Rsp
33#define REG_FP Rbp
34#define REG_PC Rip
35#else
36#define REG_SP Esp
37#define REG_FP Ebp
38#define REG_PC Eip
39#endif // AMD64
40
41extern LONG WINAPI topLevelExceptionFilter(_EXCEPTION_POINTERS* );
42
43// Install a win32 structured exception handler around thread.
44void os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread) {
45  __try {
46
47#ifndef AMD64
48    // We store the current thread in this wrapperthread location
49    // and determine how far away this address is from the structured
50    // execption pointer that FS:[0] points to.  This get_thread
51    // code can then get the thread pointer via FS.
52    //
53    // Warning:  This routine must NEVER be inlined since we'd end up with
54    //           multiple offsets.
55    //
56    volatile Thread* wrapperthread = thread;
57
58    if ( ThreadLocalStorage::get_thread_ptr_offset() == 0 ) {
59      int thread_ptr_offset;
60      __asm {
61        lea eax, dword ptr wrapperthread;
62        sub eax, dword ptr FS:[0H];
63        mov thread_ptr_offset, eax
64      };
65      ThreadLocalStorage::set_thread_ptr_offset(thread_ptr_offset);
66    }
67#ifdef ASSERT
68    // Verify that the offset hasn't changed since we initally captured
69    // it. This might happen if we accidentally ended up with an
70    // inlined version of this routine.
71    else {
72      int test_thread_ptr_offset;
73      __asm {
74        lea eax, dword ptr wrapperthread;
75        sub eax, dword ptr FS:[0H];
76        mov test_thread_ptr_offset, eax
77      };
78      assert(test_thread_ptr_offset == ThreadLocalStorage::get_thread_ptr_offset(),
79             "thread pointer offset from SEH changed");
80    }
81#endif // ASSERT
82#endif // !AMD64
83
84    f(value, method, args, thread);
85  } __except(topLevelExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) {
86      // Nothing to do.
87  }
88}
89
90#ifdef AMD64
91
92// This is the language specific handler for exceptions
93// originating from dynamically generated code.
94// We call the standard structured exception handler
95// We only expect Continued Execution since we cannot unwind
96// from generated code.
97LONG HandleExceptionFromCodeCache(
98  IN PEXCEPTION_RECORD ExceptionRecord,
99  IN ULONG64 EstablisherFrame,
100  IN OUT PCONTEXT ContextRecord,
101  IN OUT PDISPATCHER_CONTEXT DispatcherContext) {
102  EXCEPTION_POINTERS ep;
103  LONG result;
104
105  ep.ExceptionRecord = ExceptionRecord;
106  ep.ContextRecord = ContextRecord;
107
108  result = topLevelExceptionFilter(&ep);
109
110  // We better only get a CONTINUE_EXECUTION from our handler
111  // since we don't have unwind information registered.
112
113  guarantee( result == EXCEPTION_CONTINUE_EXECUTION,
114             "Unexpected result from topLevelExceptionFilter");
115
116  return(ExceptionContinueExecution);
117}
118
119
120// Structure containing the Windows Data Structures required
121// to register our Code Cache exception handler.
122// We put these in the CodeCache since the API requires
123// all addresses in these structures are relative to the Code
124// area registered with RtlAddFunctionTable.
125typedef struct {
126  char ExceptionHandlerInstr[16];  // jmp HandleExceptionFromCodeCache
127  RUNTIME_FUNCTION rt;
128  UNWIND_INFO_EH_ONLY unw;
129} DynamicCodeData, *pDynamicCodeData;
130
131#endif // AMD64
132//
133// Register our CodeCache area with the OS so it will dispatch exceptions
134// to our topLevelExceptionFilter when we take an exception in our
135// dynamically generated code.
136//
137// Arguments:  low and high are the address of the full reserved
138// codeCache area
139//
140bool os::register_code_area(char *low, char *high) {
141#ifdef AMD64
142
143  ResourceMark rm;
144
145  pDynamicCodeData pDCD;
146  PRUNTIME_FUNCTION prt;
147  PUNWIND_INFO_EH_ONLY punwind;
148
149  // If we are using Vectored Exceptions we don't need this registration
150  if (UseVectoredExceptions) return true;
151
152  BufferBlob* b = BufferBlob::create("CodeCache Exception Handler", sizeof (DynamicCodeData));
153  CodeBuffer cb(b->instructions_begin(), b->instructions_size());
154  MacroAssembler* masm = new MacroAssembler(&cb);
155  pDCD = (pDynamicCodeData) masm->pc();
156
157  masm->jump(ExternalAddress((address)&HandleExceptionFromCodeCache));
158  masm->flush();
159
160  // Create an Unwind Structure specifying no unwind info
161  // other than an Exception Handler
162  punwind = &pDCD->unw;
163  punwind->Version = 1;
164  punwind->Flags = UNW_FLAG_EHANDLER;
165  punwind->SizeOfProlog = 0;
166  punwind->CountOfCodes = 0;
167  punwind->FrameRegister = 0;
168  punwind->FrameOffset = 0;
169  punwind->ExceptionHandler = (char *)(&(pDCD->ExceptionHandlerInstr[0])) -
170                              (char*)low;
171  punwind->ExceptionData[0] = 0;
172
173  // This structure describes the covered dynamic code area.
174  // Addresses are relative to the beginning on the code cache area
175  prt = &pDCD->rt;
176  prt->BeginAddress = 0;
177  prt->EndAddress = (ULONG)(high - low);
178  prt->UnwindData = ((char *)punwind - low);
179
180  guarantee(RtlAddFunctionTable(prt, 1, (ULONGLONG)low),
181            "Failed to register Dynamic Code Exception Handler with RtlAddFunctionTable");
182
183#endif // AMD64
184  return true;
185}
186
187void os::initialize_thread() {
188// Nothing to do.
189}
190
191// Atomics and Stub Functions
192
193typedef jint      xchg_func_t            (jint,     volatile jint*);
194typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
195typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
196typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
197typedef jint      add_func_t             (jint,     volatile jint*);
198typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
199
200#ifdef AMD64
201
202jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
203  // try to use the stub:
204  xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
205
206  if (func != NULL) {
207    os::atomic_xchg_func = func;
208    return (*func)(exchange_value, dest);
209  }
210  assert(Threads::number_of_threads() == 0, "for bootstrap only");
211
212  jint old_value = *dest;
213  *dest = exchange_value;
214  return old_value;
215}
216
217intptr_t os::atomic_xchg_ptr_bootstrap(intptr_t exchange_value, volatile intptr_t* dest) {
218  // try to use the stub:
219  xchg_ptr_func_t* func = CAST_TO_FN_PTR(xchg_ptr_func_t*, StubRoutines::atomic_xchg_ptr_entry());
220
221  if (func != NULL) {
222    os::atomic_xchg_ptr_func = func;
223    return (*func)(exchange_value, dest);
224  }
225  assert(Threads::number_of_threads() == 0, "for bootstrap only");
226
227  intptr_t old_value = *dest;
228  *dest = exchange_value;
229  return old_value;
230}
231
232
233jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
234  // try to use the stub:
235  cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
236
237  if (func != NULL) {
238    os::atomic_cmpxchg_func = func;
239    return (*func)(exchange_value, dest, compare_value);
240  }
241  assert(Threads::number_of_threads() == 0, "for bootstrap only");
242
243  jint old_value = *dest;
244  if (old_value == compare_value)
245    *dest = exchange_value;
246  return old_value;
247}
248#endif // AMD64
249
250jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
251  // try to use the stub:
252  cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
253
254  if (func != NULL) {
255    os::atomic_cmpxchg_long_func = func;
256    return (*func)(exchange_value, dest, compare_value);
257  }
258  assert(Threads::number_of_threads() == 0, "for bootstrap only");
259
260  jlong old_value = *dest;
261  if (old_value == compare_value)
262    *dest = exchange_value;
263  return old_value;
264}
265
266#ifdef AMD64
267
268jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
269  // try to use the stub:
270  add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
271
272  if (func != NULL) {
273    os::atomic_add_func = func;
274    return (*func)(add_value, dest);
275  }
276  assert(Threads::number_of_threads() == 0, "for bootstrap only");
277
278  return (*dest) += add_value;
279}
280
281intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
282  // try to use the stub:
283  add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
284
285  if (func != NULL) {
286    os::atomic_add_ptr_func = func;
287    return (*func)(add_value, dest);
288  }
289  assert(Threads::number_of_threads() == 0, "for bootstrap only");
290
291  return (*dest) += add_value;
292}
293
294xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
295xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
296cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
297add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
298add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
299
300#endif // AMD64
301
302cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
303
304ExtendedPC os::fetch_frame_from_context(void* ucVoid,
305                    intptr_t** ret_sp, intptr_t** ret_fp) {
306
307  ExtendedPC  epc;
308  CONTEXT* uc = (CONTEXT*)ucVoid;
309
310  if (uc != NULL) {
311    epc = ExtendedPC((address)uc->REG_PC);
312    if (ret_sp) *ret_sp = (intptr_t*)uc->REG_SP;
313    if (ret_fp) *ret_fp = (intptr_t*)uc->REG_FP;
314  } else {
315    // construct empty ExtendedPC for return value checking
316    epc = ExtendedPC(NULL);
317    if (ret_sp) *ret_sp = (intptr_t *)NULL;
318    if (ret_fp) *ret_fp = (intptr_t *)NULL;
319  }
320
321  return epc;
322}
323
324frame os::fetch_frame_from_context(void* ucVoid) {
325  intptr_t* sp;
326  intptr_t* fp;
327  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
328  return frame(sp, fp, epc.pc());
329}
330
331// VC++ does not save frame pointer on stack in optimized build. It
332// can be turned off by /Oy-. If we really want to walk C frames,
333// we can use the StackWalk() API.
334frame os::get_sender_for_C_frame(frame* fr) {
335  return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
336}
337
338
339#ifndef AMD64
340intptr_t* _get_previous_fp() {
341  intptr_t **frameptr;
342  __asm {
343    mov frameptr, ebp
344  };
345  return *frameptr;
346}
347#endif // !AMD64
348
349frame os::current_frame() {
350
351#ifdef AMD64
352  // apparently _asm not supported on windows amd64
353  typedef intptr_t*      get_fp_func           ();
354  get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
355                                     StubRoutines::x86::get_previous_fp_entry());
356  if (func == NULL) return frame(NULL, NULL, NULL);
357  intptr_t* fp = (*func)();
358#else
359  intptr_t* fp = _get_previous_fp();
360#endif // AMD64
361
362  frame myframe((intptr_t*)os::current_stack_pointer(),
363                (intptr_t*)fp,
364                CAST_FROM_FN_PTR(address, os::current_frame));
365  if (os::is_first_C_frame(&myframe)) {
366    // stack is not walkable
367    return frame(NULL, NULL, NULL);
368  } else {
369    return os::get_sender_for_C_frame(&myframe);
370  }
371}
372
373void os::print_context(outputStream *st, void *context) {
374  if (context == NULL) return;
375
376  CONTEXT* uc = (CONTEXT*)context;
377
378  st->print_cr("Registers:");
379#ifdef AMD64
380  st->print(  "EAX=" INTPTR_FORMAT, uc->Rax);
381  st->print(", EBX=" INTPTR_FORMAT, uc->Rbx);
382  st->print(", ECX=" INTPTR_FORMAT, uc->Rcx);
383  st->print(", EDX=" INTPTR_FORMAT, uc->Rdx);
384  st->cr();
385  st->print(  "ESP=" INTPTR_FORMAT, uc->Rsp);
386  st->print(", EBP=" INTPTR_FORMAT, uc->Rbp);
387  st->print(", ESI=" INTPTR_FORMAT, uc->Rsi);
388  st->print(", EDI=" INTPTR_FORMAT, uc->Rdi);
389  st->cr();
390  st->print(  "EIP=" INTPTR_FORMAT, uc->Rip);
391  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
392#else
393  st->print(  "EAX=" INTPTR_FORMAT, uc->Eax);
394  st->print(", EBX=" INTPTR_FORMAT, uc->Ebx);
395  st->print(", ECX=" INTPTR_FORMAT, uc->Ecx);
396  st->print(", EDX=" INTPTR_FORMAT, uc->Edx);
397  st->cr();
398  st->print(  "ESP=" INTPTR_FORMAT, uc->Esp);
399  st->print(", EBP=" INTPTR_FORMAT, uc->Ebp);
400  st->print(", ESI=" INTPTR_FORMAT, uc->Esi);
401  st->print(", EDI=" INTPTR_FORMAT, uc->Edi);
402  st->cr();
403  st->print(  "EIP=" INTPTR_FORMAT, uc->Eip);
404  st->print(", EFLAGS=" INTPTR_FORMAT, uc->EFlags);
405#endif // AMD64
406  st->cr();
407  st->cr();
408
409  intptr_t *sp = (intptr_t *)uc->REG_SP;
410  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", sp);
411  print_hex_dump(st, (address)sp, (address)(sp + 32), sizeof(intptr_t));
412  st->cr();
413
414  // Note: it may be unsafe to inspect memory near pc. For example, pc may
415  // point to garbage if entry point in an nmethod is corrupted. Leave
416  // this at the end, and hope for the best.
417  address pc = (address)uc->REG_PC;
418  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", pc);
419  print_hex_dump(st, pc - 16, pc + 16, sizeof(char));
420  st->cr();
421}
422
423extern "C" int SafeFetch32 (int * adr, int Err) {
424   int rv = Err ;
425   _try {
426       rv = *((volatile int *) adr) ;
427   } __except(EXCEPTION_EXECUTE_HANDLER) {
428   }
429   return rv ;
430}
431
432extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) {
433   intptr_t rv = Err ;
434   _try {
435       rv = *((volatile intptr_t *) adr) ;
436   } __except(EXCEPTION_EXECUTE_HANDLER) {
437   }
438   return rv ;
439}
440
441extern "C" int SpinPause () {
442#ifdef AMD64
443   return 0 ;
444#else
445   // pause == rep:nop
446   // On systems that don't support pause a rep:nop
447   // is executed as a nop.  The rep: prefix is ignored.
448   _asm {
449      pause ;
450   };
451   return 1 ;
452#endif // AMD64
453}
454
455
456void os::setup_fpu() {
457#ifndef AMD64
458  int fpu_cntrl_word = StubRoutines::fpu_cntrl_wrd_std();
459  __asm fldcw fpu_cntrl_word;
460#endif // !AMD64
461}
462