os_windows.cpp revision 10373:6416cd3a77b3
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
26#define _WIN32_WINNT 0x0600
27
28// no precompiled headers
29#include "classfile/classLoader.hpp"
30#include "classfile/systemDictionary.hpp"
31#include "classfile/vmSymbols.hpp"
32#include "code/icBuffer.hpp"
33#include "code/vtableStubs.hpp"
34#include "compiler/compileBroker.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/interpreter.hpp"
37#include "jvm_windows.h"
38#include "memory/allocation.inline.hpp"
39#include "memory/filemap.hpp"
40#include "mutex_windows.inline.hpp"
41#include "oops/oop.inline.hpp"
42#include "os_share_windows.hpp"
43#include "os_windows.inline.hpp"
44#include "prims/jniFastGetField.hpp"
45#include "prims/jvm.h"
46#include "prims/jvm_misc.hpp"
47#include "runtime/arguments.hpp"
48#include "runtime/atomic.inline.hpp"
49#include "runtime/extendedPC.hpp"
50#include "runtime/globals.hpp"
51#include "runtime/interfaceSupport.hpp"
52#include "runtime/java.hpp"
53#include "runtime/javaCalls.hpp"
54#include "runtime/mutexLocker.hpp"
55#include "runtime/objectMonitor.hpp"
56#include "runtime/orderAccess.inline.hpp"
57#include "runtime/osThread.hpp"
58#include "runtime/perfMemory.hpp"
59#include "runtime/sharedRuntime.hpp"
60#include "runtime/statSampler.hpp"
61#include "runtime/stubRoutines.hpp"
62#include "runtime/thread.inline.hpp"
63#include "runtime/threadCritical.hpp"
64#include "runtime/timer.hpp"
65#include "runtime/vm_version.hpp"
66#include "semaphore_windows.hpp"
67#include "services/attachListener.hpp"
68#include "services/memTracker.hpp"
69#include "services/runtimeService.hpp"
70#include "utilities/decoder.hpp"
71#include "utilities/defaultStream.hpp"
72#include "utilities/events.hpp"
73#include "utilities/growableArray.hpp"
74#include "utilities/vmError.hpp"
75
76#ifdef _DEBUG
77#include <crtdbg.h>
78#endif
79
80
81#include <windows.h>
82#include <sys/types.h>
83#include <sys/stat.h>
84#include <sys/timeb.h>
85#include <objidl.h>
86#include <shlobj.h>
87
88#include <malloc.h>
89#include <signal.h>
90#include <direct.h>
91#include <errno.h>
92#include <fcntl.h>
93#include <io.h>
94#include <process.h>              // For _beginthreadex(), _endthreadex()
95#include <imagehlp.h>             // For os::dll_address_to_function_name
96// for enumerating dll libraries
97#include <vdmdbg.h>
98
99// for timer info max values which include all bits
100#define ALL_64_BITS CONST64(-1)
101
102// For DLL loading/load error detection
103// Values of PE COFF
104#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
105#define IMAGE_FILE_SIGNATURE_LENGTH 4
106
107static HANDLE main_process;
108static HANDLE main_thread;
109static int    main_thread_id;
110
111static FILETIME process_creation_time;
112static FILETIME process_exit_time;
113static FILETIME process_user_time;
114static FILETIME process_kernel_time;
115
116#ifdef _M_IA64
117  #define __CPU__ ia64
118#else
119  #ifdef _M_AMD64
120    #define __CPU__ amd64
121  #else
122    #define __CPU__ i486
123  #endif
124#endif
125
126// save DLL module handle, used by GetModuleFileName
127
128HINSTANCE vm_lib_handle;
129
130BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
131  switch (reason) {
132  case DLL_PROCESS_ATTACH:
133    vm_lib_handle = hinst;
134    if (ForceTimeHighResolution) {
135      timeBeginPeriod(1L);
136    }
137    break;
138  case DLL_PROCESS_DETACH:
139    if (ForceTimeHighResolution) {
140      timeEndPeriod(1L);
141    }
142    break;
143  default:
144    break;
145  }
146  return true;
147}
148
149static inline double fileTimeAsDouble(FILETIME* time) {
150  const double high  = (double) ((unsigned int) ~0);
151  const double split = 10000000.0;
152  double result = (time->dwLowDateTime / split) +
153                   time->dwHighDateTime * (high/split);
154  return result;
155}
156
157// Implementation of os
158
159bool os::unsetenv(const char* name) {
160  assert(name != NULL, "Null pointer");
161  return (SetEnvironmentVariable(name, NULL) == TRUE);
162}
163
164// No setuid programs under Windows.
165bool os::have_special_privileges() {
166  return false;
167}
168
169
170// This method is  a periodic task to check for misbehaving JNI applications
171// under CheckJNI, we can add any periodic checks here.
172// For Windows at the moment does nothing
173void os::run_periodic_checks() {
174  return;
175}
176
177// previous UnhandledExceptionFilter, if there is one
178static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
179
180LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
181
182void os::init_system_properties_values() {
183  // sysclasspath, java_home, dll_dir
184  {
185    char *home_path;
186    char *dll_path;
187    char *pslash;
188    char *bin = "\\bin";
189    char home_dir[MAX_PATH + 1];
190    char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
191
192    if (alt_home_dir != NULL)  {
193      strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
194      home_dir[MAX_PATH] = '\0';
195    } else {
196      os::jvm_path(home_dir, sizeof(home_dir));
197      // Found the full path to jvm.dll.
198      // Now cut the path to <java_home>/jre if we can.
199      *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
200      pslash = strrchr(home_dir, '\\');
201      if (pslash != NULL) {
202        *pslash = '\0';                   // get rid of \{client|server}
203        pslash = strrchr(home_dir, '\\');
204        if (pslash != NULL) {
205          *pslash = '\0';                 // get rid of \bin
206        }
207      }
208    }
209
210    home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
211    if (home_path == NULL) {
212      return;
213    }
214    strcpy(home_path, home_dir);
215    Arguments::set_java_home(home_path);
216    FREE_C_HEAP_ARRAY(char, home_path);
217
218    dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
219                                mtInternal);
220    if (dll_path == NULL) {
221      return;
222    }
223    strcpy(dll_path, home_dir);
224    strcat(dll_path, bin);
225    Arguments::set_dll_dir(dll_path);
226    FREE_C_HEAP_ARRAY(char, dll_path);
227
228    if (!set_boot_path('\\', ';')) {
229      return;
230    }
231  }
232
233// library_path
234#define EXT_DIR "\\lib\\ext"
235#define BIN_DIR "\\bin"
236#define PACKAGE_DIR "\\Sun\\Java"
237  {
238    // Win32 library search order (See the documentation for LoadLibrary):
239    //
240    // 1. The directory from which application is loaded.
241    // 2. The system wide Java Extensions directory (Java only)
242    // 3. System directory (GetSystemDirectory)
243    // 4. Windows directory (GetWindowsDirectory)
244    // 5. The PATH environment variable
245    // 6. The current directory
246
247    char *library_path;
248    char tmp[MAX_PATH];
249    char *path_str = ::getenv("PATH");
250
251    library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
252                                    sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
253
254    library_path[0] = '\0';
255
256    GetModuleFileName(NULL, tmp, sizeof(tmp));
257    *(strrchr(tmp, '\\')) = '\0';
258    strcat(library_path, tmp);
259
260    GetWindowsDirectory(tmp, sizeof(tmp));
261    strcat(library_path, ";");
262    strcat(library_path, tmp);
263    strcat(library_path, PACKAGE_DIR BIN_DIR);
264
265    GetSystemDirectory(tmp, sizeof(tmp));
266    strcat(library_path, ";");
267    strcat(library_path, tmp);
268
269    GetWindowsDirectory(tmp, sizeof(tmp));
270    strcat(library_path, ";");
271    strcat(library_path, tmp);
272
273    if (path_str) {
274      strcat(library_path, ";");
275      strcat(library_path, path_str);
276    }
277
278    strcat(library_path, ";.");
279
280    Arguments::set_library_path(library_path);
281    FREE_C_HEAP_ARRAY(char, library_path);
282  }
283
284  // Default extensions directory
285  {
286    char path[MAX_PATH];
287    char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
288    GetWindowsDirectory(path, MAX_PATH);
289    sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
290            path, PACKAGE_DIR, EXT_DIR);
291    Arguments::set_ext_dirs(buf);
292  }
293  #undef EXT_DIR
294  #undef BIN_DIR
295  #undef PACKAGE_DIR
296
297#ifndef _WIN64
298  // set our UnhandledExceptionFilter and save any previous one
299  prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
300#endif
301
302  // Done
303  return;
304}
305
306void os::breakpoint() {
307  DebugBreak();
308}
309
310// Invoked from the BREAKPOINT Macro
311extern "C" void breakpoint() {
312  os::breakpoint();
313}
314
315// RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
316// So far, this method is only used by Native Memory Tracking, which is
317// only supported on Windows XP or later.
318//
319int os::get_native_stack(address* stack, int frames, int toSkip) {
320#ifdef _NMT_NOINLINE_
321  toSkip++;
322#endif
323  int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
324  for (int index = captured; index < frames; index ++) {
325    stack[index] = NULL;
326  }
327  return captured;
328}
329
330
331// os::current_stack_base()
332//
333//   Returns the base of the stack, which is the stack's
334//   starting address.  This function must be called
335//   while running on the stack of the thread being queried.
336
337address os::current_stack_base() {
338  MEMORY_BASIC_INFORMATION minfo;
339  address stack_bottom;
340  size_t stack_size;
341
342  VirtualQuery(&minfo, &minfo, sizeof(minfo));
343  stack_bottom =  (address)minfo.AllocationBase;
344  stack_size = minfo.RegionSize;
345
346  // Add up the sizes of all the regions with the same
347  // AllocationBase.
348  while (1) {
349    VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
350    if (stack_bottom == (address)minfo.AllocationBase) {
351      stack_size += minfo.RegionSize;
352    } else {
353      break;
354    }
355  }
356
357#ifdef _M_IA64
358  // IA64 has memory and register stacks
359  //
360  // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
361  // at thread creation (1MB backing store growing upwards, 1MB memory stack
362  // growing downwards, 2MB summed up)
363  //
364  // ...
365  // ------- top of stack (high address) -----
366  // |
367  // |      1MB
368  // |      Backing Store (Register Stack)
369  // |
370  // |         / \
371  // |          |
372  // |          |
373  // |          |
374  // ------------------------ stack base -----
375  // |      1MB
376  // |      Memory Stack
377  // |
378  // |          |
379  // |          |
380  // |          |
381  // |         \ /
382  // |
383  // ----- bottom of stack (low address) -----
384  // ...
385
386  stack_size = stack_size / 2;
387#endif
388  return stack_bottom + stack_size;
389}
390
391size_t os::current_stack_size() {
392  size_t sz;
393  MEMORY_BASIC_INFORMATION minfo;
394  VirtualQuery(&minfo, &minfo, sizeof(minfo));
395  sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
396  return sz;
397}
398
399struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
400  const struct tm* time_struct_ptr = localtime(clock);
401  if (time_struct_ptr != NULL) {
402    *res = *time_struct_ptr;
403    return res;
404  }
405  return NULL;
406}
407
408LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
409
410// Thread start routine for all new Java threads
411static unsigned __stdcall java_start(Thread* thread) {
412  // Try to randomize the cache line index of hot stack frames.
413  // This helps when threads of the same stack traces evict each other's
414  // cache lines. The threads can be either from the same JVM instance, or
415  // from different JVM instances. The benefit is especially true for
416  // processors with hyperthreading technology.
417  static int counter = 0;
418  int pid = os::current_process_id();
419  _alloca(((pid ^ counter++) & 7) * 128);
420
421  thread->initialize_thread_current();
422
423  OSThread* osthr = thread->osthread();
424  assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
425
426  if (UseNUMA) {
427    int lgrp_id = os::numa_get_group_id();
428    if (lgrp_id != -1) {
429      thread->set_lgrp_id(lgrp_id);
430    }
431  }
432
433  // Diagnostic code to investigate JDK-6573254
434  int res = 30115;  // non-java thread
435  if (thread->is_Java_thread()) {
436    res = 20115;    // java thread
437  }
438
439  // Install a win32 structured exception handler around every thread created
440  // by VM, so VM can generate error dump when an exception occurred in non-
441  // Java thread (e.g. VM thread).
442  __try {
443    thread->run();
444  } __except(topLevelExceptionFilter(
445                                     (_EXCEPTION_POINTERS*)_exception_info())) {
446    // Nothing to do.
447  }
448
449  // One less thread is executing
450  // When the VMThread gets here, the main thread may have already exited
451  // which frees the CodeHeap containing the Atomic::add code
452  if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
453    Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
454  }
455
456  // Thread must not return from exit_process_or_thread(), but if it does,
457  // let it proceed to exit normally
458  return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
459}
460
461static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
462                                  int thread_id) {
463  // Allocate the OSThread object
464  OSThread* osthread = new OSThread(NULL, NULL);
465  if (osthread == NULL) return NULL;
466
467  // Initialize support for Java interrupts
468  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
469  if (interrupt_event == NULL) {
470    delete osthread;
471    return NULL;
472  }
473  osthread->set_interrupt_event(interrupt_event);
474
475  // Store info on the Win32 thread into the OSThread
476  osthread->set_thread_handle(thread_handle);
477  osthread->set_thread_id(thread_id);
478
479  if (UseNUMA) {
480    int lgrp_id = os::numa_get_group_id();
481    if (lgrp_id != -1) {
482      thread->set_lgrp_id(lgrp_id);
483    }
484  }
485
486  // Initial thread state is INITIALIZED, not SUSPENDED
487  osthread->set_state(INITIALIZED);
488
489  return osthread;
490}
491
492
493bool os::create_attached_thread(JavaThread* thread) {
494#ifdef ASSERT
495  thread->verify_not_published();
496#endif
497  HANDLE thread_h;
498  if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
499                       &thread_h, THREAD_ALL_ACCESS, false, 0)) {
500    fatal("DuplicateHandle failed\n");
501  }
502  OSThread* osthread = create_os_thread(thread, thread_h,
503                                        (int)current_thread_id());
504  if (osthread == NULL) {
505    return false;
506  }
507
508  // Initial thread state is RUNNABLE
509  osthread->set_state(RUNNABLE);
510
511  thread->set_osthread(osthread);
512  return true;
513}
514
515bool os::create_main_thread(JavaThread* thread) {
516#ifdef ASSERT
517  thread->verify_not_published();
518#endif
519  if (_starting_thread == NULL) {
520    _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
521    if (_starting_thread == NULL) {
522      return false;
523    }
524  }
525
526  // The primordial thread is runnable from the start)
527  _starting_thread->set_state(RUNNABLE);
528
529  thread->set_osthread(_starting_thread);
530  return true;
531}
532
533// Allocate and initialize a new OSThread
534bool os::create_thread(Thread* thread, ThreadType thr_type,
535                       size_t stack_size) {
536  unsigned thread_id;
537
538  // Allocate the OSThread object
539  OSThread* osthread = new OSThread(NULL, NULL);
540  if (osthread == NULL) {
541    return false;
542  }
543
544  // Initialize support for Java interrupts
545  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
546  if (interrupt_event == NULL) {
547    delete osthread;
548    return NULL;
549  }
550  osthread->set_interrupt_event(interrupt_event);
551  osthread->set_interrupted(false);
552
553  thread->set_osthread(osthread);
554
555  if (stack_size == 0) {
556    switch (thr_type) {
557    case os::java_thread:
558      // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
559      if (JavaThread::stack_size_at_create() > 0) {
560        stack_size = JavaThread::stack_size_at_create();
561      }
562      break;
563    case os::compiler_thread:
564      if (CompilerThreadStackSize > 0) {
565        stack_size = (size_t)(CompilerThreadStackSize * K);
566        break;
567      } // else fall through:
568        // use VMThreadStackSize if CompilerThreadStackSize is not defined
569    case os::vm_thread:
570    case os::pgc_thread:
571    case os::cgc_thread:
572    case os::watcher_thread:
573      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
574      break;
575    }
576  }
577
578  // Create the Win32 thread
579  //
580  // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
581  // does not specify stack size. Instead, it specifies the size of
582  // initially committed space. The stack size is determined by
583  // PE header in the executable. If the committed "stack_size" is larger
584  // than default value in the PE header, the stack is rounded up to the
585  // nearest multiple of 1MB. For example if the launcher has default
586  // stack size of 320k, specifying any size less than 320k does not
587  // affect the actual stack size at all, it only affects the initial
588  // commitment. On the other hand, specifying 'stack_size' larger than
589  // default value may cause significant increase in memory usage, because
590  // not only the stack space will be rounded up to MB, but also the
591  // entire space is committed upfront.
592  //
593  // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
594  // for CreateThread() that can treat 'stack_size' as stack size. However we
595  // are not supposed to call CreateThread() directly according to MSDN
596  // document because JVM uses C runtime library. The good news is that the
597  // flag appears to work with _beginthredex() as well.
598
599  HANDLE thread_handle =
600    (HANDLE)_beginthreadex(NULL,
601                           (unsigned)stack_size,
602                           (unsigned (__stdcall *)(void*)) java_start,
603                           thread,
604                           CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
605                           &thread_id);
606
607  if (thread_handle == NULL) {
608    // Need to clean up stuff we've allocated so far
609    CloseHandle(osthread->interrupt_event());
610    thread->set_osthread(NULL);
611    delete osthread;
612    return NULL;
613  }
614
615  Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
616
617  // Store info on the Win32 thread into the OSThread
618  osthread->set_thread_handle(thread_handle);
619  osthread->set_thread_id(thread_id);
620
621  // Initial thread state is INITIALIZED, not SUSPENDED
622  osthread->set_state(INITIALIZED);
623
624  // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
625  return true;
626}
627
628
629// Free Win32 resources related to the OSThread
630void os::free_thread(OSThread* osthread) {
631  assert(osthread != NULL, "osthread not set");
632  CloseHandle(osthread->thread_handle());
633  CloseHandle(osthread->interrupt_event());
634  delete osthread;
635}
636
637static jlong first_filetime;
638static jlong initial_performance_count;
639static jlong performance_frequency;
640
641
642jlong as_long(LARGE_INTEGER x) {
643  jlong result = 0; // initialization to avoid warning
644  set_high(&result, x.HighPart);
645  set_low(&result, x.LowPart);
646  return result;
647}
648
649
650jlong os::elapsed_counter() {
651  LARGE_INTEGER count;
652  QueryPerformanceCounter(&count);
653  return as_long(count) - initial_performance_count;
654}
655
656
657jlong os::elapsed_frequency() {
658  return performance_frequency;
659}
660
661
662julong os::available_memory() {
663  return win32::available_memory();
664}
665
666julong os::win32::available_memory() {
667  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
668  // value if total memory is larger than 4GB
669  MEMORYSTATUSEX ms;
670  ms.dwLength = sizeof(ms);
671  GlobalMemoryStatusEx(&ms);
672
673  return (julong)ms.ullAvailPhys;
674}
675
676julong os::physical_memory() {
677  return win32::physical_memory();
678}
679
680bool os::has_allocatable_memory_limit(julong* limit) {
681  MEMORYSTATUSEX ms;
682  ms.dwLength = sizeof(ms);
683  GlobalMemoryStatusEx(&ms);
684#ifdef _LP64
685  *limit = (julong)ms.ullAvailVirtual;
686  return true;
687#else
688  // Limit to 1400m because of the 2gb address space wall
689  *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
690  return true;
691#endif
692}
693
694int os::active_processor_count() {
695  DWORD_PTR lpProcessAffinityMask = 0;
696  DWORD_PTR lpSystemAffinityMask = 0;
697  int proc_count = processor_count();
698  if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
699      GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
700    // Nof active processors is number of bits in process affinity mask
701    int bitcount = 0;
702    while (lpProcessAffinityMask != 0) {
703      lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
704      bitcount++;
705    }
706    return bitcount;
707  } else {
708    return proc_count;
709  }
710}
711
712void os::set_native_thread_name(const char *name) {
713
714  // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
715  //
716  // Note that unfortunately this only works if the process
717  // is already attached to a debugger; debugger must observe
718  // the exception below to show the correct name.
719
720  const DWORD MS_VC_EXCEPTION = 0x406D1388;
721  struct {
722    DWORD dwType;     // must be 0x1000
723    LPCSTR szName;    // pointer to name (in user addr space)
724    DWORD dwThreadID; // thread ID (-1=caller thread)
725    DWORD dwFlags;    // reserved for future use, must be zero
726  } info;
727
728  info.dwType = 0x1000;
729  info.szName = name;
730  info.dwThreadID = -1;
731  info.dwFlags = 0;
732
733  __try {
734    RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
735  } __except(EXCEPTION_CONTINUE_EXECUTION) {}
736}
737
738bool os::distribute_processes(uint length, uint* distribution) {
739  // Not yet implemented.
740  return false;
741}
742
743bool os::bind_to_processor(uint processor_id) {
744  // Not yet implemented.
745  return false;
746}
747
748void os::win32::initialize_performance_counter() {
749  LARGE_INTEGER count;
750  QueryPerformanceFrequency(&count);
751  performance_frequency = as_long(count);
752  QueryPerformanceCounter(&count);
753  initial_performance_count = as_long(count);
754}
755
756
757double os::elapsedTime() {
758  return (double) elapsed_counter() / (double) elapsed_frequency();
759}
760
761
762// Windows format:
763//   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
764// Java format:
765//   Java standards require the number of milliseconds since 1/1/1970
766
767// Constant offset - calculated using offset()
768static jlong  _offset   = 116444736000000000;
769// Fake time counter for reproducible results when debugging
770static jlong  fake_time = 0;
771
772#ifdef ASSERT
773// Just to be safe, recalculate the offset in debug mode
774static jlong _calculated_offset = 0;
775static int   _has_calculated_offset = 0;
776
777jlong offset() {
778  if (_has_calculated_offset) return _calculated_offset;
779  SYSTEMTIME java_origin;
780  java_origin.wYear          = 1970;
781  java_origin.wMonth         = 1;
782  java_origin.wDayOfWeek     = 0; // ignored
783  java_origin.wDay           = 1;
784  java_origin.wHour          = 0;
785  java_origin.wMinute        = 0;
786  java_origin.wSecond        = 0;
787  java_origin.wMilliseconds  = 0;
788  FILETIME jot;
789  if (!SystemTimeToFileTime(&java_origin, &jot)) {
790    fatal("Error = %d\nWindows error", GetLastError());
791  }
792  _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
793  _has_calculated_offset = 1;
794  assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
795  return _calculated_offset;
796}
797#else
798jlong offset() {
799  return _offset;
800}
801#endif
802
803jlong windows_to_java_time(FILETIME wt) {
804  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
805  return (a - offset()) / 10000;
806}
807
808// Returns time ticks in (10th of micro seconds)
809jlong windows_to_time_ticks(FILETIME wt) {
810  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
811  return (a - offset());
812}
813
814FILETIME java_to_windows_time(jlong l) {
815  jlong a = (l * 10000) + offset();
816  FILETIME result;
817  result.dwHighDateTime = high(a);
818  result.dwLowDateTime  = low(a);
819  return result;
820}
821
822bool os::supports_vtime() { return true; }
823bool os::enable_vtime() { return false; }
824bool os::vtime_enabled() { return false; }
825
826double os::elapsedVTime() {
827  FILETIME created;
828  FILETIME exited;
829  FILETIME kernel;
830  FILETIME user;
831  if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
832    // the resolution of windows_to_java_time() should be sufficient (ms)
833    return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
834  } else {
835    return elapsedTime();
836  }
837}
838
839jlong os::javaTimeMillis() {
840  if (UseFakeTimers) {
841    return fake_time++;
842  } else {
843    FILETIME wt;
844    GetSystemTimeAsFileTime(&wt);
845    return windows_to_java_time(wt);
846  }
847}
848
849void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
850  FILETIME wt;
851  GetSystemTimeAsFileTime(&wt);
852  jlong ticks = windows_to_time_ticks(wt); // 10th of micros
853  jlong secs = jlong(ticks / 10000000); // 10000 * 1000
854  seconds = secs;
855  nanos = jlong(ticks - (secs*10000000)) * 100;
856}
857
858jlong os::javaTimeNanos() {
859    LARGE_INTEGER current_count;
860    QueryPerformanceCounter(&current_count);
861    double current = as_long(current_count);
862    double freq = performance_frequency;
863    jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
864    return time;
865}
866
867void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
868  jlong freq = performance_frequency;
869  if (freq < NANOSECS_PER_SEC) {
870    // the performance counter is 64 bits and we will
871    // be multiplying it -- so no wrap in 64 bits
872    info_ptr->max_value = ALL_64_BITS;
873  } else if (freq > NANOSECS_PER_SEC) {
874    // use the max value the counter can reach to
875    // determine the max value which could be returned
876    julong max_counter = (julong)ALL_64_BITS;
877    info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
878  } else {
879    // the performance counter is 64 bits and we will
880    // be using it directly -- so no wrap in 64 bits
881    info_ptr->max_value = ALL_64_BITS;
882  }
883
884  // using a counter, so no skipping
885  info_ptr->may_skip_backward = false;
886  info_ptr->may_skip_forward = false;
887
888  info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
889}
890
891char* os::local_time_string(char *buf, size_t buflen) {
892  SYSTEMTIME st;
893  GetLocalTime(&st);
894  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
895               st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
896  return buf;
897}
898
899bool os::getTimesSecs(double* process_real_time,
900                      double* process_user_time,
901                      double* process_system_time) {
902  HANDLE h_process = GetCurrentProcess();
903  FILETIME create_time, exit_time, kernel_time, user_time;
904  BOOL result = GetProcessTimes(h_process,
905                                &create_time,
906                                &exit_time,
907                                &kernel_time,
908                                &user_time);
909  if (result != 0) {
910    FILETIME wt;
911    GetSystemTimeAsFileTime(&wt);
912    jlong rtc_millis = windows_to_java_time(wt);
913    jlong user_millis = windows_to_java_time(user_time);
914    jlong system_millis = windows_to_java_time(kernel_time);
915    *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
916    *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
917    *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
918    return true;
919  } else {
920    return false;
921  }
922}
923
924void os::shutdown() {
925  // allow PerfMemory to attempt cleanup of any persistent resources
926  perfMemory_exit();
927
928  // flush buffered output, finish log files
929  ostream_abort();
930
931  // Check for abort hook
932  abort_hook_t abort_hook = Arguments::abort_hook();
933  if (abort_hook != NULL) {
934    abort_hook();
935  }
936}
937
938
939static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
940                                         PMINIDUMP_EXCEPTION_INFORMATION,
941                                         PMINIDUMP_USER_STREAM_INFORMATION,
942                                         PMINIDUMP_CALLBACK_INFORMATION);
943
944static HANDLE dumpFile = NULL;
945
946// Check if dump file can be created.
947void os::check_dump_limit(char* buffer, size_t buffsz) {
948  bool status = true;
949  if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
950    jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
951    status = false;
952  }
953
954#ifndef ASSERT
955  if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
956    jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
957    status = false;
958  }
959#endif
960
961  if (status) {
962    const char* cwd = get_current_directory(NULL, 0);
963    int pid = current_process_id();
964    if (cwd != NULL) {
965      jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
966    } else {
967      jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
968    }
969
970    if (dumpFile == NULL &&
971       (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
972                 == INVALID_HANDLE_VALUE) {
973      jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
974      status = false;
975    }
976  }
977  VMError::record_coredump_status(buffer, status);
978}
979
980void os::abort(bool dump_core, void* siginfo, const void* context) {
981  HINSTANCE dbghelp;
982  EXCEPTION_POINTERS ep;
983  MINIDUMP_EXCEPTION_INFORMATION mei;
984  MINIDUMP_EXCEPTION_INFORMATION* pmei;
985
986  HANDLE hProcess = GetCurrentProcess();
987  DWORD processId = GetCurrentProcessId();
988  MINIDUMP_TYPE dumpType;
989
990  shutdown();
991  if (!dump_core || dumpFile == NULL) {
992    if (dumpFile != NULL) {
993      CloseHandle(dumpFile);
994    }
995    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
996  }
997
998  dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
999
1000  if (dbghelp == NULL) {
1001    jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1002    CloseHandle(dumpFile);
1003    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1004  }
1005
1006  _MiniDumpWriteDump =
1007      CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1008                                    PMINIDUMP_EXCEPTION_INFORMATION,
1009                                    PMINIDUMP_USER_STREAM_INFORMATION,
1010                                    PMINIDUMP_CALLBACK_INFORMATION),
1011                                    GetProcAddress(dbghelp,
1012                                    "MiniDumpWriteDump"));
1013
1014  if (_MiniDumpWriteDump == NULL) {
1015    jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1016    CloseHandle(dumpFile);
1017    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1018  }
1019
1020  dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1021    MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1022
1023  if (siginfo != NULL && context != NULL) {
1024    ep.ContextRecord = (PCONTEXT) context;
1025    ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1026
1027    mei.ThreadId = GetCurrentThreadId();
1028    mei.ExceptionPointers = &ep;
1029    pmei = &mei;
1030  } else {
1031    pmei = NULL;
1032  }
1033
1034  // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1035  // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1036  if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1037      _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1038    jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1039  }
1040  CloseHandle(dumpFile);
1041  win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1042}
1043
1044// Die immediately, no exit hook, no abort hook, no cleanup.
1045void os::die() {
1046  win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1047}
1048
1049// Directory routines copied from src/win32/native/java/io/dirent_md.c
1050//  * dirent_md.c       1.15 00/02/02
1051//
1052// The declarations for DIR and struct dirent are in jvm_win32.h.
1053
1054// Caller must have already run dirname through JVM_NativePath, which removes
1055// duplicate slashes and converts all instances of '/' into '\\'.
1056
1057DIR * os::opendir(const char *dirname) {
1058  assert(dirname != NULL, "just checking");   // hotspot change
1059  DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1060  DWORD fattr;                                // hotspot change
1061  char alt_dirname[4] = { 0, 0, 0, 0 };
1062
1063  if (dirp == 0) {
1064    errno = ENOMEM;
1065    return 0;
1066  }
1067
1068  // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1069  // as a directory in FindFirstFile().  We detect this case here and
1070  // prepend the current drive name.
1071  //
1072  if (dirname[1] == '\0' && dirname[0] == '\\') {
1073    alt_dirname[0] = _getdrive() + 'A' - 1;
1074    alt_dirname[1] = ':';
1075    alt_dirname[2] = '\\';
1076    alt_dirname[3] = '\0';
1077    dirname = alt_dirname;
1078  }
1079
1080  dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1081  if (dirp->path == 0) {
1082    free(dirp);
1083    errno = ENOMEM;
1084    return 0;
1085  }
1086  strcpy(dirp->path, dirname);
1087
1088  fattr = GetFileAttributes(dirp->path);
1089  if (fattr == 0xffffffff) {
1090    free(dirp->path);
1091    free(dirp);
1092    errno = ENOENT;
1093    return 0;
1094  } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1095    free(dirp->path);
1096    free(dirp);
1097    errno = ENOTDIR;
1098    return 0;
1099  }
1100
1101  // Append "*.*", or possibly "\\*.*", to path
1102  if (dirp->path[1] == ':' &&
1103      (dirp->path[2] == '\0' ||
1104      (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1105    // No '\\' needed for cases like "Z:" or "Z:\"
1106    strcat(dirp->path, "*.*");
1107  } else {
1108    strcat(dirp->path, "\\*.*");
1109  }
1110
1111  dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1112  if (dirp->handle == INVALID_HANDLE_VALUE) {
1113    if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1114      free(dirp->path);
1115      free(dirp);
1116      errno = EACCES;
1117      return 0;
1118    }
1119  }
1120  return dirp;
1121}
1122
1123// parameter dbuf unused on Windows
1124struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1125  assert(dirp != NULL, "just checking");      // hotspot change
1126  if (dirp->handle == INVALID_HANDLE_VALUE) {
1127    return 0;
1128  }
1129
1130  strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1131
1132  if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1133    if (GetLastError() == ERROR_INVALID_HANDLE) {
1134      errno = EBADF;
1135      return 0;
1136    }
1137    FindClose(dirp->handle);
1138    dirp->handle = INVALID_HANDLE_VALUE;
1139  }
1140
1141  return &dirp->dirent;
1142}
1143
1144int os::closedir(DIR *dirp) {
1145  assert(dirp != NULL, "just checking");      // hotspot change
1146  if (dirp->handle != INVALID_HANDLE_VALUE) {
1147    if (!FindClose(dirp->handle)) {
1148      errno = EBADF;
1149      return -1;
1150    }
1151    dirp->handle = INVALID_HANDLE_VALUE;
1152  }
1153  free(dirp->path);
1154  free(dirp);
1155  return 0;
1156}
1157
1158// This must be hard coded because it's the system's temporary
1159// directory not the java application's temp directory, ala java.io.tmpdir.
1160const char* os::get_temp_directory() {
1161  static char path_buf[MAX_PATH];
1162  if (GetTempPath(MAX_PATH, path_buf) > 0) {
1163    return path_buf;
1164  } else {
1165    path_buf[0] = '\0';
1166    return path_buf;
1167  }
1168}
1169
1170static bool file_exists(const char* filename) {
1171  if (filename == NULL || strlen(filename) == 0) {
1172    return false;
1173  }
1174  return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1175}
1176
1177bool os::dll_build_name(char *buffer, size_t buflen,
1178                        const char* pname, const char* fname) {
1179  bool retval = false;
1180  const size_t pnamelen = pname ? strlen(pname) : 0;
1181  const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1182
1183  // Return error on buffer overflow.
1184  if (pnamelen + strlen(fname) + 10 > buflen) {
1185    return retval;
1186  }
1187
1188  if (pnamelen == 0) {
1189    jio_snprintf(buffer, buflen, "%s.dll", fname);
1190    retval = true;
1191  } else if (c == ':' || c == '\\') {
1192    jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1193    retval = true;
1194  } else if (strchr(pname, *os::path_separator()) != NULL) {
1195    int n;
1196    char** pelements = split_path(pname, &n);
1197    if (pelements == NULL) {
1198      return false;
1199    }
1200    for (int i = 0; i < n; i++) {
1201      char* path = pelements[i];
1202      // Really shouldn't be NULL, but check can't hurt
1203      size_t plen = (path == NULL) ? 0 : strlen(path);
1204      if (plen == 0) {
1205        continue; // skip the empty path values
1206      }
1207      const char lastchar = path[plen - 1];
1208      if (lastchar == ':' || lastchar == '\\') {
1209        jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1210      } else {
1211        jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1212      }
1213      if (file_exists(buffer)) {
1214        retval = true;
1215        break;
1216      }
1217    }
1218    // release the storage
1219    for (int i = 0; i < n; i++) {
1220      if (pelements[i] != NULL) {
1221        FREE_C_HEAP_ARRAY(char, pelements[i]);
1222      }
1223    }
1224    if (pelements != NULL) {
1225      FREE_C_HEAP_ARRAY(char*, pelements);
1226    }
1227  } else {
1228    jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1229    retval = true;
1230  }
1231  return retval;
1232}
1233
1234// Needs to be in os specific directory because windows requires another
1235// header file <direct.h>
1236const char* os::get_current_directory(char *buf, size_t buflen) {
1237  int n = static_cast<int>(buflen);
1238  if (buflen > INT_MAX)  n = INT_MAX;
1239  return _getcwd(buf, n);
1240}
1241
1242//-----------------------------------------------------------
1243// Helper functions for fatal error handler
1244#ifdef _WIN64
1245// Helper routine which returns true if address in
1246// within the NTDLL address space.
1247//
1248static bool _addr_in_ntdll(address addr) {
1249  HMODULE hmod;
1250  MODULEINFO minfo;
1251
1252  hmod = GetModuleHandle("NTDLL.DLL");
1253  if (hmod == NULL) return false;
1254  if (!GetModuleInformation(GetCurrentProcess(), hmod,
1255                                          &minfo, sizeof(MODULEINFO))) {
1256    return false;
1257  }
1258
1259  if ((addr >= minfo.lpBaseOfDll) &&
1260      (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1261    return true;
1262  } else {
1263    return false;
1264  }
1265}
1266#endif
1267
1268struct _modinfo {
1269  address addr;
1270  char*   full_path;   // point to a char buffer
1271  int     buflen;      // size of the buffer
1272  address base_addr;
1273};
1274
1275static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1276                                  address top_address, void * param) {
1277  struct _modinfo *pmod = (struct _modinfo *)param;
1278  if (!pmod) return -1;
1279
1280  if (base_addr   <= pmod->addr &&
1281      top_address > pmod->addr) {
1282    // if a buffer is provided, copy path name to the buffer
1283    if (pmod->full_path) {
1284      jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1285    }
1286    pmod->base_addr = base_addr;
1287    return 1;
1288  }
1289  return 0;
1290}
1291
1292bool os::dll_address_to_library_name(address addr, char* buf,
1293                                     int buflen, int* offset) {
1294  // buf is not optional, but offset is optional
1295  assert(buf != NULL, "sanity check");
1296
1297// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1298//       return the full path to the DLL file, sometimes it returns path
1299//       to the corresponding PDB file (debug info); sometimes it only
1300//       returns partial path, which makes life painful.
1301
1302  struct _modinfo mi;
1303  mi.addr      = addr;
1304  mi.full_path = buf;
1305  mi.buflen    = buflen;
1306  if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1307    // buf already contains path name
1308    if (offset) *offset = addr - mi.base_addr;
1309    return true;
1310  }
1311
1312  buf[0] = '\0';
1313  if (offset) *offset = -1;
1314  return false;
1315}
1316
1317bool os::dll_address_to_function_name(address addr, char *buf,
1318                                      int buflen, int *offset,
1319                                      bool demangle) {
1320  // buf is not optional, but offset is optional
1321  assert(buf != NULL, "sanity check");
1322
1323  if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1324    return true;
1325  }
1326  if (offset != NULL)  *offset  = -1;
1327  buf[0] = '\0';
1328  return false;
1329}
1330
1331// save the start and end address of jvm.dll into param[0] and param[1]
1332static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1333                           address top_address, void * param) {
1334  if (!param) return -1;
1335
1336  if (base_addr   <= (address)_locate_jvm_dll &&
1337      top_address > (address)_locate_jvm_dll) {
1338    ((address*)param)[0] = base_addr;
1339    ((address*)param)[1] = top_address;
1340    return 1;
1341  }
1342  return 0;
1343}
1344
1345address vm_lib_location[2];    // start and end address of jvm.dll
1346
1347// check if addr is inside jvm.dll
1348bool os::address_is_in_vm(address addr) {
1349  if (!vm_lib_location[0] || !vm_lib_location[1]) {
1350    if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1351      assert(false, "Can't find jvm module.");
1352      return false;
1353    }
1354  }
1355
1356  return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1357}
1358
1359// print module info; param is outputStream*
1360static int _print_module(const char* fname, address base_address,
1361                         address top_address, void* param) {
1362  if (!param) return -1;
1363
1364  outputStream* st = (outputStream*)param;
1365
1366  st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1367  return 0;
1368}
1369
1370// Loads .dll/.so and
1371// in case of error it checks if .dll/.so was built for the
1372// same architecture as Hotspot is running on
1373void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1374  void * result = LoadLibrary(name);
1375  if (result != NULL) {
1376    return result;
1377  }
1378
1379  DWORD errcode = GetLastError();
1380  if (errcode == ERROR_MOD_NOT_FOUND) {
1381    strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1382    ebuf[ebuflen - 1] = '\0';
1383    return NULL;
1384  }
1385
1386  // Parsing dll below
1387  // If we can read dll-info and find that dll was built
1388  // for an architecture other than Hotspot is running in
1389  // - then print to buffer "DLL was built for a different architecture"
1390  // else call os::lasterror to obtain system error message
1391
1392  // Read system error message into ebuf
1393  // It may or may not be overwritten below (in the for loop and just above)
1394  lasterror(ebuf, (size_t) ebuflen);
1395  ebuf[ebuflen - 1] = '\0';
1396  int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1397  if (fd < 0) {
1398    return NULL;
1399  }
1400
1401  uint32_t signature_offset;
1402  uint16_t lib_arch = 0;
1403  bool failed_to_get_lib_arch =
1404    ( // Go to position 3c in the dll
1405     (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1406     ||
1407     // Read location of signature
1408     (sizeof(signature_offset) !=
1409     (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1410     ||
1411     // Go to COFF File Header in dll
1412     // that is located after "signature" (4 bytes long)
1413     (os::seek_to_file_offset(fd,
1414     signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1415     ||
1416     // Read field that contains code of architecture
1417     // that dll was built for
1418     (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1419    );
1420
1421  ::close(fd);
1422  if (failed_to_get_lib_arch) {
1423    // file i/o error - report os::lasterror(...) msg
1424    return NULL;
1425  }
1426
1427  typedef struct {
1428    uint16_t arch_code;
1429    char* arch_name;
1430  } arch_t;
1431
1432  static const arch_t arch_array[] = {
1433    {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1434    {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1435    {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1436  };
1437#if   (defined _M_IA64)
1438  static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1439#elif (defined _M_AMD64)
1440  static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1441#elif (defined _M_IX86)
1442  static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1443#else
1444  #error Method os::dll_load requires that one of following \
1445         is defined :_M_IA64,_M_AMD64 or _M_IX86
1446#endif
1447
1448
1449  // Obtain a string for printf operation
1450  // lib_arch_str shall contain string what platform this .dll was built for
1451  // running_arch_str shall string contain what platform Hotspot was built for
1452  char *running_arch_str = NULL, *lib_arch_str = NULL;
1453  for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1454    if (lib_arch == arch_array[i].arch_code) {
1455      lib_arch_str = arch_array[i].arch_name;
1456    }
1457    if (running_arch == arch_array[i].arch_code) {
1458      running_arch_str = arch_array[i].arch_name;
1459    }
1460  }
1461
1462  assert(running_arch_str,
1463         "Didn't find running architecture code in arch_array");
1464
1465  // If the architecture is right
1466  // but some other error took place - report os::lasterror(...) msg
1467  if (lib_arch == running_arch) {
1468    return NULL;
1469  }
1470
1471  if (lib_arch_str != NULL) {
1472    ::_snprintf(ebuf, ebuflen - 1,
1473                "Can't load %s-bit .dll on a %s-bit platform",
1474                lib_arch_str, running_arch_str);
1475  } else {
1476    // don't know what architecture this dll was build for
1477    ::_snprintf(ebuf, ebuflen - 1,
1478                "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1479                lib_arch, running_arch_str);
1480  }
1481
1482  return NULL;
1483}
1484
1485void os::print_dll_info(outputStream *st) {
1486  st->print_cr("Dynamic libraries:");
1487  get_loaded_modules_info(_print_module, (void *)st);
1488}
1489
1490int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1491  HANDLE   hProcess;
1492
1493# define MAX_NUM_MODULES 128
1494  HMODULE     modules[MAX_NUM_MODULES];
1495  static char filename[MAX_PATH];
1496  int         result = 0;
1497
1498  int pid = os::current_process_id();
1499  hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1500                         FALSE, pid);
1501  if (hProcess == NULL) return 0;
1502
1503  DWORD size_needed;
1504  if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1505    CloseHandle(hProcess);
1506    return 0;
1507  }
1508
1509  // number of modules that are currently loaded
1510  int num_modules = size_needed / sizeof(HMODULE);
1511
1512  for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1513    // Get Full pathname:
1514    if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1515      filename[0] = '\0';
1516    }
1517
1518    MODULEINFO modinfo;
1519    if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1520      modinfo.lpBaseOfDll = NULL;
1521      modinfo.SizeOfImage = 0;
1522    }
1523
1524    // Invoke callback function
1525    result = callback(filename, (address)modinfo.lpBaseOfDll,
1526                      (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1527    if (result) break;
1528  }
1529
1530  CloseHandle(hProcess);
1531  return result;
1532}
1533
1534bool os::get_host_name(char* buf, size_t buflen) {
1535  DWORD size = (DWORD)buflen;
1536  return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1537}
1538
1539void os::get_summary_os_info(char* buf, size_t buflen) {
1540  stringStream sst(buf, buflen);
1541  os::win32::print_windows_version(&sst);
1542  // chop off newline character
1543  char* nl = strchr(buf, '\n');
1544  if (nl != NULL) *nl = '\0';
1545}
1546
1547int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1548  int ret = vsnprintf(buf, len, fmt, args);
1549  // Get the correct buffer size if buf is too small
1550  if (ret < 0) {
1551    return _vscprintf(fmt, args);
1552  }
1553  return ret;
1554}
1555
1556void os::print_os_info_brief(outputStream* st) {
1557  os::print_os_info(st);
1558}
1559
1560void os::print_os_info(outputStream* st) {
1561#ifdef ASSERT
1562  char buffer[1024];
1563  st->print("HostName: ");
1564  if (get_host_name(buffer, sizeof(buffer))) {
1565    st->print("%s ", buffer);
1566  } else {
1567    st->print("N/A ");
1568  }
1569#endif
1570  st->print("OS:");
1571  os::win32::print_windows_version(st);
1572}
1573
1574void os::win32::print_windows_version(outputStream* st) {
1575  OSVERSIONINFOEX osvi;
1576  VS_FIXEDFILEINFO *file_info;
1577  TCHAR kernel32_path[MAX_PATH];
1578  UINT len, ret;
1579
1580  // Use the GetVersionEx information to see if we're on a server or
1581  // workstation edition of Windows. Starting with Windows 8.1 we can't
1582  // trust the OS version information returned by this API.
1583  ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1584  osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1585  if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1586    st->print_cr("Call to GetVersionEx failed");
1587    return;
1588  }
1589  bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1590
1591  // Get the full path to \Windows\System32\kernel32.dll and use that for
1592  // determining what version of Windows we're running on.
1593  len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1594  ret = GetSystemDirectory(kernel32_path, len);
1595  if (ret == 0 || ret > len) {
1596    st->print_cr("Call to GetSystemDirectory failed");
1597    return;
1598  }
1599  strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1600
1601  DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1602  if (version_size == 0) {
1603    st->print_cr("Call to GetFileVersionInfoSize failed");
1604    return;
1605  }
1606
1607  LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1608  if (version_info == NULL) {
1609    st->print_cr("Failed to allocate version_info");
1610    return;
1611  }
1612
1613  if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1614    os::free(version_info);
1615    st->print_cr("Call to GetFileVersionInfo failed");
1616    return;
1617  }
1618
1619  if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1620    os::free(version_info);
1621    st->print_cr("Call to VerQueryValue failed");
1622    return;
1623  }
1624
1625  int major_version = HIWORD(file_info->dwProductVersionMS);
1626  int minor_version = LOWORD(file_info->dwProductVersionMS);
1627  int build_number = HIWORD(file_info->dwProductVersionLS);
1628  int build_minor = LOWORD(file_info->dwProductVersionLS);
1629  int os_vers = major_version * 1000 + minor_version;
1630  os::free(version_info);
1631
1632  st->print(" Windows ");
1633  switch (os_vers) {
1634
1635  case 6000:
1636    if (is_workstation) {
1637      st->print("Vista");
1638    } else {
1639      st->print("Server 2008");
1640    }
1641    break;
1642
1643  case 6001:
1644    if (is_workstation) {
1645      st->print("7");
1646    } else {
1647      st->print("Server 2008 R2");
1648    }
1649    break;
1650
1651  case 6002:
1652    if (is_workstation) {
1653      st->print("8");
1654    } else {
1655      st->print("Server 2012");
1656    }
1657    break;
1658
1659  case 6003:
1660    if (is_workstation) {
1661      st->print("8.1");
1662    } else {
1663      st->print("Server 2012 R2");
1664    }
1665    break;
1666
1667  case 10000:
1668    if (is_workstation) {
1669      st->print("10");
1670    } else {
1671      st->print("Server 2016");
1672    }
1673    break;
1674
1675  default:
1676    // Unrecognized windows, print out its major and minor versions
1677    st->print("%d.%d", major_version, minor_version);
1678    break;
1679  }
1680
1681  // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1682  // find out whether we are running on 64 bit processor or not
1683  SYSTEM_INFO si;
1684  ZeroMemory(&si, sizeof(SYSTEM_INFO));
1685  GetNativeSystemInfo(&si);
1686  if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1687    st->print(" , 64 bit");
1688  }
1689
1690  st->print(" Build %d", build_number);
1691  st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1692  st->cr();
1693}
1694
1695void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1696  // Nothing to do for now.
1697}
1698
1699void os::get_summary_cpu_info(char* buf, size_t buflen) {
1700  HKEY key;
1701  DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1702               "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1703  if (status == ERROR_SUCCESS) {
1704    DWORD size = (DWORD)buflen;
1705    status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1706    if (status != ERROR_SUCCESS) {
1707        strncpy(buf, "## __CPU__", buflen);
1708    }
1709    RegCloseKey(key);
1710  } else {
1711    // Put generic cpu info to return
1712    strncpy(buf, "## __CPU__", buflen);
1713  }
1714}
1715
1716void os::print_memory_info(outputStream* st) {
1717  st->print("Memory:");
1718  st->print(" %dk page", os::vm_page_size()>>10);
1719
1720  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1721  // value if total memory is larger than 4GB
1722  MEMORYSTATUSEX ms;
1723  ms.dwLength = sizeof(ms);
1724  GlobalMemoryStatusEx(&ms);
1725
1726  st->print(", physical %uk", os::physical_memory() >> 10);
1727  st->print("(%uk free)", os::available_memory() >> 10);
1728
1729  st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1730  st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1731  st->cr();
1732}
1733
1734void os::print_siginfo(outputStream *st, const void* siginfo) {
1735  const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1736  st->print("siginfo:");
1737
1738  char tmp[64];
1739  if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1740    strcpy(tmp, "EXCEPTION_??");
1741  }
1742  st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1743
1744  if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1745       er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1746       er->NumberParameters >= 2) {
1747    switch (er->ExceptionInformation[0]) {
1748    case 0: st->print(", reading address"); break;
1749    case 1: st->print(", writing address"); break;
1750    case 8: st->print(", data execution prevention violation at address"); break;
1751    default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1752                       er->ExceptionInformation[0]);
1753    }
1754    st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1755  } else {
1756    int num = er->NumberParameters;
1757    if (num > 0) {
1758      st->print(", ExceptionInformation=");
1759      for (int i = 0; i < num; i++) {
1760        st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1761      }
1762    }
1763  }
1764  st->cr();
1765}
1766
1767void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1768  // do nothing
1769}
1770
1771static char saved_jvm_path[MAX_PATH] = {0};
1772
1773// Find the full path to the current module, jvm.dll
1774void os::jvm_path(char *buf, jint buflen) {
1775  // Error checking.
1776  if (buflen < MAX_PATH) {
1777    assert(false, "must use a large-enough buffer");
1778    buf[0] = '\0';
1779    return;
1780  }
1781  // Lazy resolve the path to current module.
1782  if (saved_jvm_path[0] != 0) {
1783    strcpy(buf, saved_jvm_path);
1784    return;
1785  }
1786
1787  buf[0] = '\0';
1788  if (Arguments::sun_java_launcher_is_altjvm()) {
1789    // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1790    // for a JAVA_HOME environment variable and fix up the path so it
1791    // looks like jvm.dll is installed there (append a fake suffix
1792    // hotspot/jvm.dll).
1793    char* java_home_var = ::getenv("JAVA_HOME");
1794    if (java_home_var != NULL && java_home_var[0] != 0 &&
1795        strlen(java_home_var) < (size_t)buflen) {
1796      strncpy(buf, java_home_var, buflen);
1797
1798      // determine if this is a legacy image or modules image
1799      // modules image doesn't have "jre" subdirectory
1800      size_t len = strlen(buf);
1801      char* jrebin_p = buf + len;
1802      jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1803      if (0 != _access(buf, 0)) {
1804        jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1805      }
1806      len = strlen(buf);
1807      jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1808    }
1809  }
1810
1811  if (buf[0] == '\0') {
1812    GetModuleFileName(vm_lib_handle, buf, buflen);
1813  }
1814  strncpy(saved_jvm_path, buf, MAX_PATH);
1815  saved_jvm_path[MAX_PATH - 1] = '\0';
1816}
1817
1818
1819void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1820#ifndef _WIN64
1821  st->print("_");
1822#endif
1823}
1824
1825
1826void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1827#ifndef _WIN64
1828  st->print("@%d", args_size  * sizeof(int));
1829#endif
1830}
1831
1832// This method is a copy of JDK's sysGetLastErrorString
1833// from src/windows/hpi/src/system_md.c
1834
1835size_t os::lasterror(char* buf, size_t len) {
1836  DWORD errval;
1837
1838  if ((errval = GetLastError()) != 0) {
1839    // DOS error
1840    size_t n = (size_t)FormatMessage(
1841                                     FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1842                                     NULL,
1843                                     errval,
1844                                     0,
1845                                     buf,
1846                                     (DWORD)len,
1847                                     NULL);
1848    if (n > 3) {
1849      // Drop final '.', CR, LF
1850      if (buf[n - 1] == '\n') n--;
1851      if (buf[n - 1] == '\r') n--;
1852      if (buf[n - 1] == '.') n--;
1853      buf[n] = '\0';
1854    }
1855    return n;
1856  }
1857
1858  if (errno != 0) {
1859    // C runtime error that has no corresponding DOS error code
1860    const char* s = strerror(errno);
1861    size_t n = strlen(s);
1862    if (n >= len) n = len - 1;
1863    strncpy(buf, s, n);
1864    buf[n] = '\0';
1865    return n;
1866  }
1867
1868  return 0;
1869}
1870
1871int os::get_last_error() {
1872  DWORD error = GetLastError();
1873  if (error == 0) {
1874    error = errno;
1875  }
1876  return (int)error;
1877}
1878
1879WindowsSemaphore::WindowsSemaphore(uint value) {
1880  _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1881
1882  guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1883}
1884
1885WindowsSemaphore::~WindowsSemaphore() {
1886  ::CloseHandle(_semaphore);
1887}
1888
1889void WindowsSemaphore::signal(uint count) {
1890  if (count > 0) {
1891    BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1892
1893    assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1894  }
1895}
1896
1897void WindowsSemaphore::wait() {
1898  DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1899  assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1900  assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1901}
1902
1903// sun.misc.Signal
1904// NOTE that this is a workaround for an apparent kernel bug where if
1905// a signal handler for SIGBREAK is installed then that signal handler
1906// takes priority over the console control handler for CTRL_CLOSE_EVENT.
1907// See bug 4416763.
1908static void (*sigbreakHandler)(int) = NULL;
1909
1910static void UserHandler(int sig, void *siginfo, void *context) {
1911  os::signal_notify(sig);
1912  // We need to reinstate the signal handler each time...
1913  os::signal(sig, (void*)UserHandler);
1914}
1915
1916void* os::user_handler() {
1917  return (void*) UserHandler;
1918}
1919
1920void* os::signal(int signal_number, void* handler) {
1921  if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1922    void (*oldHandler)(int) = sigbreakHandler;
1923    sigbreakHandler = (void (*)(int)) handler;
1924    return (void*) oldHandler;
1925  } else {
1926    return (void*)::signal(signal_number, (void (*)(int))handler);
1927  }
1928}
1929
1930void os::signal_raise(int signal_number) {
1931  raise(signal_number);
1932}
1933
1934// The Win32 C runtime library maps all console control events other than ^C
1935// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1936// logoff, and shutdown events.  We therefore install our own console handler
1937// that raises SIGTERM for the latter cases.
1938//
1939static BOOL WINAPI consoleHandler(DWORD event) {
1940  switch (event) {
1941  case CTRL_C_EVENT:
1942    if (is_error_reported()) {
1943      // Ctrl-C is pressed during error reporting, likely because the error
1944      // handler fails to abort. Let VM die immediately.
1945      os::die();
1946    }
1947
1948    os::signal_raise(SIGINT);
1949    return TRUE;
1950    break;
1951  case CTRL_BREAK_EVENT:
1952    if (sigbreakHandler != NULL) {
1953      (*sigbreakHandler)(SIGBREAK);
1954    }
1955    return TRUE;
1956    break;
1957  case CTRL_LOGOFF_EVENT: {
1958    // Don't terminate JVM if it is running in a non-interactive session,
1959    // such as a service process.
1960    USEROBJECTFLAGS flags;
1961    HANDLE handle = GetProcessWindowStation();
1962    if (handle != NULL &&
1963        GetUserObjectInformation(handle, UOI_FLAGS, &flags,
1964        sizeof(USEROBJECTFLAGS), NULL)) {
1965      // If it is a non-interactive session, let next handler to deal
1966      // with it.
1967      if ((flags.dwFlags & WSF_VISIBLE) == 0) {
1968        return FALSE;
1969      }
1970    }
1971  }
1972  case CTRL_CLOSE_EVENT:
1973  case CTRL_SHUTDOWN_EVENT:
1974    os::signal_raise(SIGTERM);
1975    return TRUE;
1976    break;
1977  default:
1978    break;
1979  }
1980  return FALSE;
1981}
1982
1983// The following code is moved from os.cpp for making this
1984// code platform specific, which it is by its very nature.
1985
1986// Return maximum OS signal used + 1 for internal use only
1987// Used as exit signal for signal_thread
1988int os::sigexitnum_pd() {
1989  return NSIG;
1990}
1991
1992// a counter for each possible signal value, including signal_thread exit signal
1993static volatile jint pending_signals[NSIG+1] = { 0 };
1994static HANDLE sig_sem = NULL;
1995
1996void os::signal_init_pd() {
1997  // Initialize signal structures
1998  memset((void*)pending_signals, 0, sizeof(pending_signals));
1999
2000  sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2001
2002  // Programs embedding the VM do not want it to attempt to receive
2003  // events like CTRL_LOGOFF_EVENT, which are used to implement the
2004  // shutdown hooks mechanism introduced in 1.3.  For example, when
2005  // the VM is run as part of a Windows NT service (i.e., a servlet
2006  // engine in a web server), the correct behavior is for any console
2007  // control handler to return FALSE, not TRUE, because the OS's
2008  // "final" handler for such events allows the process to continue if
2009  // it is a service (while terminating it if it is not a service).
2010  // To make this behavior uniform and the mechanism simpler, we
2011  // completely disable the VM's usage of these console events if -Xrs
2012  // (=ReduceSignalUsage) is specified.  This means, for example, that
2013  // the CTRL-BREAK thread dump mechanism is also disabled in this
2014  // case.  See bugs 4323062, 4345157, and related bugs.
2015
2016  if (!ReduceSignalUsage) {
2017    // Add a CTRL-C handler
2018    SetConsoleCtrlHandler(consoleHandler, TRUE);
2019  }
2020}
2021
2022void os::signal_notify(int signal_number) {
2023  BOOL ret;
2024  if (sig_sem != NULL) {
2025    Atomic::inc(&pending_signals[signal_number]);
2026    ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2027    assert(ret != 0, "ReleaseSemaphore() failed");
2028  }
2029}
2030
2031static int check_pending_signals(bool wait_for_signal) {
2032  DWORD ret;
2033  while (true) {
2034    for (int i = 0; i < NSIG + 1; i++) {
2035      jint n = pending_signals[i];
2036      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2037        return i;
2038      }
2039    }
2040    if (!wait_for_signal) {
2041      return -1;
2042    }
2043
2044    JavaThread *thread = JavaThread::current();
2045
2046    ThreadBlockInVM tbivm(thread);
2047
2048    bool threadIsSuspended;
2049    do {
2050      thread->set_suspend_equivalent();
2051      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2052      ret = ::WaitForSingleObject(sig_sem, INFINITE);
2053      assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2054
2055      // were we externally suspended while we were waiting?
2056      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2057      if (threadIsSuspended) {
2058        // The semaphore has been incremented, but while we were waiting
2059        // another thread suspended us. We don't want to continue running
2060        // while suspended because that would surprise the thread that
2061        // suspended us.
2062        ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2063        assert(ret != 0, "ReleaseSemaphore() failed");
2064
2065        thread->java_suspend_self();
2066      }
2067    } while (threadIsSuspended);
2068  }
2069}
2070
2071int os::signal_lookup() {
2072  return check_pending_signals(false);
2073}
2074
2075int os::signal_wait() {
2076  return check_pending_signals(true);
2077}
2078
2079// Implicit OS exception handling
2080
2081LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2082                      address handler) {
2083    JavaThread* thread = (JavaThread*) Thread::current_or_null();
2084  // Save pc in thread
2085#ifdef _M_IA64
2086  // Do not blow up if no thread info available.
2087  if (thread) {
2088    // Saving PRECISE pc (with slot information) in thread.
2089    uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2090    // Convert precise PC into "Unix" format
2091    precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2092    thread->set_saved_exception_pc((address)precise_pc);
2093  }
2094  // Set pc to handler
2095  exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2096  // Clear out psr.ri (= Restart Instruction) in order to continue
2097  // at the beginning of the target bundle.
2098  exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2099  assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2100#else
2101  #ifdef _M_AMD64
2102  // Do not blow up if no thread info available.
2103  if (thread) {
2104    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2105  }
2106  // Set pc to handler
2107  exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2108  #else
2109  // Do not blow up if no thread info available.
2110  if (thread) {
2111    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2112  }
2113  // Set pc to handler
2114  exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2115  #endif
2116#endif
2117
2118  // Continue the execution
2119  return EXCEPTION_CONTINUE_EXECUTION;
2120}
2121
2122
2123// Used for PostMortemDump
2124extern "C" void safepoints();
2125extern "C" void find(int x);
2126extern "C" void events();
2127
2128// According to Windows API documentation, an illegal instruction sequence should generate
2129// the 0xC000001C exception code. However, real world experience shows that occasionnaly
2130// the execution of an illegal instruction can generate the exception code 0xC000001E. This
2131// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2132
2133#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2134
2135// From "Execution Protection in the Windows Operating System" draft 0.35
2136// Once a system header becomes available, the "real" define should be
2137// included or copied here.
2138#define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2139
2140// Handle NAT Bit consumption on IA64.
2141#ifdef _M_IA64
2142  #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2143#endif
2144
2145// Windows Vista/2008 heap corruption check
2146#define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2147
2148#define def_excpt(val) #val, val
2149
2150struct siglabel {
2151  char *name;
2152  int   number;
2153};
2154
2155// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2156// C++ compiler contain this error code. Because this is a compiler-generated
2157// error, the code is not listed in the Win32 API header files.
2158// The code is actually a cryptic mnemonic device, with the initial "E"
2159// standing for "exception" and the final 3 bytes (0x6D7363) representing the
2160// ASCII values of "msc".
2161
2162#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2163
2164
2165struct siglabel exceptlabels[] = {
2166    def_excpt(EXCEPTION_ACCESS_VIOLATION),
2167    def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2168    def_excpt(EXCEPTION_BREAKPOINT),
2169    def_excpt(EXCEPTION_SINGLE_STEP),
2170    def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2171    def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2172    def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2173    def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2174    def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2175    def_excpt(EXCEPTION_FLT_OVERFLOW),
2176    def_excpt(EXCEPTION_FLT_STACK_CHECK),
2177    def_excpt(EXCEPTION_FLT_UNDERFLOW),
2178    def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2179    def_excpt(EXCEPTION_INT_OVERFLOW),
2180    def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2181    def_excpt(EXCEPTION_IN_PAGE_ERROR),
2182    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2183    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2184    def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2185    def_excpt(EXCEPTION_STACK_OVERFLOW),
2186    def_excpt(EXCEPTION_INVALID_DISPOSITION),
2187    def_excpt(EXCEPTION_GUARD_PAGE),
2188    def_excpt(EXCEPTION_INVALID_HANDLE),
2189    def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2190    def_excpt(EXCEPTION_HEAP_CORRUPTION),
2191#ifdef _M_IA64
2192    def_excpt(EXCEPTION_REG_NAT_CONSUMPTION),
2193#endif
2194    NULL, 0
2195};
2196
2197const char* os::exception_name(int exception_code, char *buf, size_t size) {
2198  for (int i = 0; exceptlabels[i].name != NULL; i++) {
2199    if (exceptlabels[i].number == exception_code) {
2200      jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2201      return buf;
2202    }
2203  }
2204
2205  return NULL;
2206}
2207
2208//-----------------------------------------------------------------------------
2209LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2210  // handle exception caused by idiv; should only happen for -MinInt/-1
2211  // (division by zero is handled explicitly)
2212#ifdef _M_IA64
2213  assert(0, "Fix Handle_IDiv_Exception");
2214#else
2215  #ifdef  _M_AMD64
2216  PCONTEXT ctx = exceptionInfo->ContextRecord;
2217  address pc = (address)ctx->Rip;
2218  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2219  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2220  if (pc[0] == 0xF7) {
2221    // set correct result values and continue after idiv instruction
2222    ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2223  } else {
2224    ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2225  }
2226  // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2227  // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2228  // idiv opcode (0xF7).
2229  ctx->Rdx = (DWORD)0;             // remainder
2230  // Continue the execution
2231  #else
2232  PCONTEXT ctx = exceptionInfo->ContextRecord;
2233  address pc = (address)ctx->Eip;
2234  assert(pc[0] == 0xF7, "not an idiv opcode");
2235  assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2236  assert(ctx->Eax == min_jint, "unexpected idiv exception");
2237  // set correct result values and continue after idiv instruction
2238  ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2239  ctx->Eax = (DWORD)min_jint;      // result
2240  ctx->Edx = (DWORD)0;             // remainder
2241  // Continue the execution
2242  #endif
2243#endif
2244  return EXCEPTION_CONTINUE_EXECUTION;
2245}
2246
2247//-----------------------------------------------------------------------------
2248LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2249  PCONTEXT ctx = exceptionInfo->ContextRecord;
2250#ifndef  _WIN64
2251  // handle exception caused by native method modifying control word
2252  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2253
2254  switch (exception_code) {
2255  case EXCEPTION_FLT_DENORMAL_OPERAND:
2256  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2257  case EXCEPTION_FLT_INEXACT_RESULT:
2258  case EXCEPTION_FLT_INVALID_OPERATION:
2259  case EXCEPTION_FLT_OVERFLOW:
2260  case EXCEPTION_FLT_STACK_CHECK:
2261  case EXCEPTION_FLT_UNDERFLOW:
2262    jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2263    if (fp_control_word != ctx->FloatSave.ControlWord) {
2264      // Restore FPCW and mask out FLT exceptions
2265      ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2266      // Mask out pending FLT exceptions
2267      ctx->FloatSave.StatusWord &=  0xffffff00;
2268      return EXCEPTION_CONTINUE_EXECUTION;
2269    }
2270  }
2271
2272  if (prev_uef_handler != NULL) {
2273    // We didn't handle this exception so pass it to the previous
2274    // UnhandledExceptionFilter.
2275    return (prev_uef_handler)(exceptionInfo);
2276  }
2277#else // !_WIN64
2278  // On Windows, the mxcsr control bits are non-volatile across calls
2279  // See also CR 6192333
2280  //
2281  jint MxCsr = INITIAL_MXCSR;
2282  // we can't use StubRoutines::addr_mxcsr_std()
2283  // because in Win64 mxcsr is not saved there
2284  if (MxCsr != ctx->MxCsr) {
2285    ctx->MxCsr = MxCsr;
2286    return EXCEPTION_CONTINUE_EXECUTION;
2287  }
2288#endif // !_WIN64
2289
2290  return EXCEPTION_CONTINUE_SEARCH;
2291}
2292
2293static inline void report_error(Thread* t, DWORD exception_code,
2294                                address addr, void* siginfo, void* context) {
2295  VMError::report_and_die(t, exception_code, addr, siginfo, context);
2296
2297  // If UseOsErrorReporting, this will return here and save the error file
2298  // somewhere where we can find it in the minidump.
2299}
2300
2301bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2302        struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2303  PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2304  address addr = (address) exceptionRecord->ExceptionInformation[1];
2305  if (Interpreter::contains(pc)) {
2306    *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2307    if (!fr->is_first_java_frame()) {
2308      assert(fr->safe_for_sender(thread), "Safety check");
2309      *fr = fr->java_sender();
2310    }
2311  } else {
2312    // more complex code with compiled code
2313    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2314    CodeBlob* cb = CodeCache::find_blob(pc);
2315    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2316      // Not sure where the pc points to, fallback to default
2317      // stack overflow handling
2318      return false;
2319    } else {
2320      *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2321      // in compiled code, the stack banging is performed just after the return pc
2322      // has been pushed on the stack
2323      *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2324      if (!fr->is_java_frame()) {
2325        assert(fr->safe_for_sender(thread), "Safety check");
2326        *fr = fr->java_sender();
2327      }
2328    }
2329  }
2330  assert(fr->is_java_frame(), "Safety check");
2331  return true;
2332}
2333
2334//-----------------------------------------------------------------------------
2335LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2336  if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2337  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2338#ifdef _M_IA64
2339  // On Itanium, we need the "precise pc", which has the slot number coded
2340  // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2341  address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2342  // Convert the pc to "Unix format", which has the slot number coded
2343  // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2344  // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2345  // information is saved in the Unix format.
2346  address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2347#else
2348  #ifdef _M_AMD64
2349  address pc = (address) exceptionInfo->ContextRecord->Rip;
2350  #else
2351  address pc = (address) exceptionInfo->ContextRecord->Eip;
2352  #endif
2353#endif
2354  Thread* t = Thread::current_or_null_safe();
2355
2356  // Handle SafeFetch32 and SafeFetchN exceptions.
2357  if (StubRoutines::is_safefetch_fault(pc)) {
2358    return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2359  }
2360
2361#ifndef _WIN64
2362  // Execution protection violation - win32 running on AMD64 only
2363  // Handled first to avoid misdiagnosis as a "normal" access violation;
2364  // This is safe to do because we have a new/unique ExceptionInformation
2365  // code for this condition.
2366  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2367    PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2368    int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2369    address addr = (address) exceptionRecord->ExceptionInformation[1];
2370
2371    if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2372      int page_size = os::vm_page_size();
2373
2374      // Make sure the pc and the faulting address are sane.
2375      //
2376      // If an instruction spans a page boundary, and the page containing
2377      // the beginning of the instruction is executable but the following
2378      // page is not, the pc and the faulting address might be slightly
2379      // different - we still want to unguard the 2nd page in this case.
2380      //
2381      // 15 bytes seems to be a (very) safe value for max instruction size.
2382      bool pc_is_near_addr =
2383        (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2384      bool instr_spans_page_boundary =
2385        (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2386                         (intptr_t) page_size) > 0);
2387
2388      if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2389        static volatile address last_addr =
2390          (address) os::non_memory_address_word();
2391
2392        // In conservative mode, don't unguard unless the address is in the VM
2393        if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2394            (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2395
2396          // Set memory to RWX and retry
2397          address page_start =
2398            (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2399          bool res = os::protect_memory((char*) page_start, page_size,
2400                                        os::MEM_PROT_RWX);
2401
2402          if (PrintMiscellaneous && Verbose) {
2403            char buf[256];
2404            jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2405                         "at " INTPTR_FORMAT
2406                         ", unguarding " INTPTR_FORMAT ": %s", addr,
2407                         page_start, (res ? "success" : strerror(errno)));
2408            tty->print_raw_cr(buf);
2409          }
2410
2411          // Set last_addr so if we fault again at the same address, we don't
2412          // end up in an endless loop.
2413          //
2414          // There are two potential complications here.  Two threads trapping
2415          // at the same address at the same time could cause one of the
2416          // threads to think it already unguarded, and abort the VM.  Likely
2417          // very rare.
2418          //
2419          // The other race involves two threads alternately trapping at
2420          // different addresses and failing to unguard the page, resulting in
2421          // an endless loop.  This condition is probably even more unlikely
2422          // than the first.
2423          //
2424          // Although both cases could be avoided by using locks or thread
2425          // local last_addr, these solutions are unnecessary complication:
2426          // this handler is a best-effort safety net, not a complete solution.
2427          // It is disabled by default and should only be used as a workaround
2428          // in case we missed any no-execute-unsafe VM code.
2429
2430          last_addr = addr;
2431
2432          return EXCEPTION_CONTINUE_EXECUTION;
2433        }
2434      }
2435
2436      // Last unguard failed or not unguarding
2437      tty->print_raw_cr("Execution protection violation");
2438      report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2439                   exceptionInfo->ContextRecord);
2440      return EXCEPTION_CONTINUE_SEARCH;
2441    }
2442  }
2443#endif // _WIN64
2444
2445  // Check to see if we caught the safepoint code in the
2446  // process of write protecting the memory serialization page.
2447  // It write enables the page immediately after protecting it
2448  // so just return.
2449  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2450    JavaThread* thread = (JavaThread*) t;
2451    PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2452    address addr = (address) exceptionRecord->ExceptionInformation[1];
2453    if (os::is_memory_serialize_page(thread, addr)) {
2454      // Block current thread until the memory serialize page permission restored.
2455      os::block_on_serialize_page_trap();
2456      return EXCEPTION_CONTINUE_EXECUTION;
2457    }
2458  }
2459
2460  if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2461      VM_Version::is_cpuinfo_segv_addr(pc)) {
2462    // Verify that OS save/restore AVX registers.
2463    return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2464  }
2465
2466  if (t != NULL && t->is_Java_thread()) {
2467    JavaThread* thread = (JavaThread*) t;
2468    bool in_java = thread->thread_state() == _thread_in_Java;
2469
2470    // Handle potential stack overflows up front.
2471    if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2472#ifdef _M_IA64
2473      // Use guard page for register stack.
2474      PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2475      address addr = (address) exceptionRecord->ExceptionInformation[1];
2476      // Check for a register stack overflow on Itanium
2477      if (thread->addr_inside_register_stack_red_zone(addr)) {
2478        // Fatal red zone violation happens if the Java program
2479        // catches a StackOverflow error and does so much processing
2480        // that it runs beyond the unprotected yellow guard zone. As
2481        // a result, we are out of here.
2482        fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2483      } else if(thread->addr_inside_register_stack(addr)) {
2484        // Disable the yellow zone which sets the state that
2485        // we've got a stack overflow problem.
2486        if (thread->stack_yellow_reserved_zone_enabled()) {
2487          thread->disable_stack_yellow_reserved_zone();
2488        }
2489        // Give us some room to process the exception.
2490        thread->disable_register_stack_guard();
2491        // Tracing with +Verbose.
2492        if (Verbose) {
2493          tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2494          tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2495          tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2496          tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2497                        thread->register_stack_base(),
2498                        thread->register_stack_base() + thread->stack_size());
2499        }
2500
2501        // Reguard the permanent register stack red zone just to be sure.
2502        // We saw Windows silently disabling this without telling us.
2503        thread->enable_register_stack_red_zone();
2504
2505        return Handle_Exception(exceptionInfo,
2506                                SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2507      }
2508#endif
2509      if (thread->stack_guards_enabled()) {
2510        if (_thread_in_Java) {
2511          frame fr;
2512          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2513          address addr = (address) exceptionRecord->ExceptionInformation[1];
2514          if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2515            assert(fr.is_java_frame(), "Must be a Java frame");
2516            SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2517          }
2518        }
2519        // Yellow zone violation.  The o/s has unprotected the first yellow
2520        // zone page for us.  Note:  must call disable_stack_yellow_zone to
2521        // update the enabled status, even if the zone contains only one page.
2522        thread->disable_stack_yellow_reserved_zone();
2523        // If not in java code, return and hope for the best.
2524        return in_java
2525            ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2526            :  EXCEPTION_CONTINUE_EXECUTION;
2527      } else {
2528        // Fatal red zone violation.
2529        thread->disable_stack_red_zone();
2530        tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2531        report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2532                      exceptionInfo->ContextRecord);
2533        return EXCEPTION_CONTINUE_SEARCH;
2534      }
2535    } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2536      // Either stack overflow or null pointer exception.
2537      if (in_java) {
2538        PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2539        address addr = (address) exceptionRecord->ExceptionInformation[1];
2540        address stack_end = thread->stack_end();
2541        if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2542          // Stack overflow.
2543          assert(!os::uses_stack_guard_pages(),
2544                 "should be caught by red zone code above.");
2545          return Handle_Exception(exceptionInfo,
2546                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2547        }
2548        // Check for safepoint polling and implicit null
2549        // We only expect null pointers in the stubs (vtable)
2550        // the rest are checked explicitly now.
2551        CodeBlob* cb = CodeCache::find_blob(pc);
2552        if (cb != NULL) {
2553          if (os::is_poll_address(addr)) {
2554            address stub = SharedRuntime::get_poll_stub(pc);
2555            return Handle_Exception(exceptionInfo, stub);
2556          }
2557        }
2558        {
2559#ifdef _WIN64
2560          // If it's a legal stack address map the entire region in
2561          //
2562          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2563          address addr = (address) exceptionRecord->ExceptionInformation[1];
2564          if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2565            addr = (address)((uintptr_t)addr &
2566                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2567            os::commit_memory((char *)addr, thread->stack_base() - addr,
2568                              !ExecMem);
2569            return EXCEPTION_CONTINUE_EXECUTION;
2570          } else
2571#endif
2572          {
2573            // Null pointer exception.
2574#ifdef _M_IA64
2575            // Process implicit null checks in compiled code. Note: Implicit null checks
2576            // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2577            if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2578              CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2579              // Handle implicit null check in UEP method entry
2580              if (cb && (cb->is_frame_complete_at(pc) ||
2581                         (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2582                if (Verbose) {
2583                  intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2584                  tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2585                  tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2586                  tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2587                                *(bundle_start + 1), *bundle_start);
2588                }
2589                return Handle_Exception(exceptionInfo,
2590                                        SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2591              }
2592            }
2593
2594            // Implicit null checks were processed above.  Hence, we should not reach
2595            // here in the usual case => die!
2596            if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2597            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2598                         exceptionInfo->ContextRecord);
2599            return EXCEPTION_CONTINUE_SEARCH;
2600
2601#else // !IA64
2602
2603            if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2604              address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2605              if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2606            }
2607            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2608                         exceptionInfo->ContextRecord);
2609            return EXCEPTION_CONTINUE_SEARCH;
2610#endif
2611          }
2612        }
2613      }
2614
2615#ifdef _WIN64
2616      // Special care for fast JNI field accessors.
2617      // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2618      // in and the heap gets shrunk before the field access.
2619      if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2620        address addr = JNI_FastGetField::find_slowcase_pc(pc);
2621        if (addr != (address)-1) {
2622          return Handle_Exception(exceptionInfo, addr);
2623        }
2624      }
2625#endif
2626
2627      // Stack overflow or null pointer exception in native code.
2628      report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2629                   exceptionInfo->ContextRecord);
2630      return EXCEPTION_CONTINUE_SEARCH;
2631    } // /EXCEPTION_ACCESS_VIOLATION
2632    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2633#if defined _M_IA64
2634    else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2635              exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2636      M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2637
2638      // Compiled method patched to be non entrant? Following conditions must apply:
2639      // 1. must be first instruction in bundle
2640      // 2. must be a break instruction with appropriate code
2641      if ((((uint64_t) pc & 0x0F) == 0) &&
2642          (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2643        return Handle_Exception(exceptionInfo,
2644                                (address)SharedRuntime::get_handle_wrong_method_stub());
2645      }
2646    } // /EXCEPTION_ILLEGAL_INSTRUCTION
2647#endif
2648
2649
2650    if (in_java) {
2651      switch (exception_code) {
2652      case EXCEPTION_INT_DIVIDE_BY_ZERO:
2653        return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2654
2655      case EXCEPTION_INT_OVERFLOW:
2656        return Handle_IDiv_Exception(exceptionInfo);
2657
2658      } // switch
2659    }
2660    if (((thread->thread_state() == _thread_in_Java) ||
2661         (thread->thread_state() == _thread_in_native)) &&
2662         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2663      LONG result=Handle_FLT_Exception(exceptionInfo);
2664      if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2665    }
2666  }
2667
2668  if (exception_code != EXCEPTION_BREAKPOINT) {
2669    report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2670                 exceptionInfo->ContextRecord);
2671  }
2672  return EXCEPTION_CONTINUE_SEARCH;
2673}
2674
2675#ifndef _WIN64
2676// Special care for fast JNI accessors.
2677// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2678// the heap gets shrunk before the field access.
2679// Need to install our own structured exception handler since native code may
2680// install its own.
2681LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2682  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2683  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2684    address pc = (address) exceptionInfo->ContextRecord->Eip;
2685    address addr = JNI_FastGetField::find_slowcase_pc(pc);
2686    if (addr != (address)-1) {
2687      return Handle_Exception(exceptionInfo, addr);
2688    }
2689  }
2690  return EXCEPTION_CONTINUE_SEARCH;
2691}
2692
2693#define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2694  Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2695                                                     jobject obj,           \
2696                                                     jfieldID fieldID) {    \
2697    __try {                                                                 \
2698      return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2699                                                                 obj,       \
2700                                                                 fieldID);  \
2701    } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2702                                              _exception_info())) {         \
2703    }                                                                       \
2704    return 0;                                                               \
2705  }
2706
2707DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2708DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2709DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2710DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2711DEFINE_FAST_GETFIELD(jint,     int,    Int)
2712DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2713DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2714DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2715
2716address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2717  switch (type) {
2718  case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2719  case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2720  case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2721  case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2722  case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2723  case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2724  case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2725  case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2726  default:        ShouldNotReachHere();
2727  }
2728  return (address)-1;
2729}
2730#endif
2731
2732// Virtual Memory
2733
2734int os::vm_page_size() { return os::win32::vm_page_size(); }
2735int os::vm_allocation_granularity() {
2736  return os::win32::vm_allocation_granularity();
2737}
2738
2739// Windows large page support is available on Windows 2003. In order to use
2740// large page memory, the administrator must first assign additional privilege
2741// to the user:
2742//   + select Control Panel -> Administrative Tools -> Local Security Policy
2743//   + select Local Policies -> User Rights Assignment
2744//   + double click "Lock pages in memory", add users and/or groups
2745//   + reboot
2746// Note the above steps are needed for administrator as well, as administrators
2747// by default do not have the privilege to lock pages in memory.
2748//
2749// Note about Windows 2003: although the API supports committing large page
2750// memory on a page-by-page basis and VirtualAlloc() returns success under this
2751// scenario, I found through experiment it only uses large page if the entire
2752// memory region is reserved and committed in a single VirtualAlloc() call.
2753// This makes Windows large page support more or less like Solaris ISM, in
2754// that the entire heap must be committed upfront. This probably will change
2755// in the future, if so the code below needs to be revisited.
2756
2757#ifndef MEM_LARGE_PAGES
2758  #define MEM_LARGE_PAGES 0x20000000
2759#endif
2760
2761static HANDLE    _hProcess;
2762static HANDLE    _hToken;
2763
2764// Container for NUMA node list info
2765class NUMANodeListHolder {
2766 private:
2767  int *_numa_used_node_list;  // allocated below
2768  int _numa_used_node_count;
2769
2770  void free_node_list() {
2771    if (_numa_used_node_list != NULL) {
2772      FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2773    }
2774  }
2775
2776 public:
2777  NUMANodeListHolder() {
2778    _numa_used_node_count = 0;
2779    _numa_used_node_list = NULL;
2780    // do rest of initialization in build routine (after function pointers are set up)
2781  }
2782
2783  ~NUMANodeListHolder() {
2784    free_node_list();
2785  }
2786
2787  bool build() {
2788    DWORD_PTR proc_aff_mask;
2789    DWORD_PTR sys_aff_mask;
2790    if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2791    ULONG highest_node_number;
2792    if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2793    free_node_list();
2794    _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2795    for (unsigned int i = 0; i <= highest_node_number; i++) {
2796      ULONGLONG proc_mask_numa_node;
2797      if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2798      if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2799        _numa_used_node_list[_numa_used_node_count++] = i;
2800      }
2801    }
2802    return (_numa_used_node_count > 1);
2803  }
2804
2805  int get_count() { return _numa_used_node_count; }
2806  int get_node_list_entry(int n) {
2807    // for indexes out of range, returns -1
2808    return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2809  }
2810
2811} numa_node_list_holder;
2812
2813
2814
2815static size_t _large_page_size = 0;
2816
2817static bool request_lock_memory_privilege() {
2818  _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2819                          os::current_process_id());
2820
2821  LUID luid;
2822  if (_hProcess != NULL &&
2823      OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2824      LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2825
2826    TOKEN_PRIVILEGES tp;
2827    tp.PrivilegeCount = 1;
2828    tp.Privileges[0].Luid = luid;
2829    tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2830
2831    // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2832    // privilege. Check GetLastError() too. See MSDN document.
2833    if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2834        (GetLastError() == ERROR_SUCCESS)) {
2835      return true;
2836    }
2837  }
2838
2839  return false;
2840}
2841
2842static void cleanup_after_large_page_init() {
2843  if (_hProcess) CloseHandle(_hProcess);
2844  _hProcess = NULL;
2845  if (_hToken) CloseHandle(_hToken);
2846  _hToken = NULL;
2847}
2848
2849static bool numa_interleaving_init() {
2850  bool success = false;
2851  bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2852
2853  // print a warning if UseNUMAInterleaving flag is specified on command line
2854  bool warn_on_failure = use_numa_interleaving_specified;
2855#define WARN(msg) if (warn_on_failure) { warning(msg); }
2856
2857  // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2858  size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2859  NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2860
2861  if (numa_node_list_holder.build()) {
2862    if (PrintMiscellaneous && Verbose) {
2863      tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2864      for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2865        tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2866      }
2867      tty->print("\n");
2868    }
2869    success = true;
2870  } else {
2871    WARN("Process does not cover multiple NUMA nodes.");
2872  }
2873  if (!success) {
2874    if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2875  }
2876  return success;
2877#undef WARN
2878}
2879
2880// this routine is used whenever we need to reserve a contiguous VA range
2881// but we need to make separate VirtualAlloc calls for each piece of the range
2882// Reasons for doing this:
2883//  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2884//  * UseNUMAInterleaving requires a separate node for each piece
2885static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2886                                         DWORD prot,
2887                                         bool should_inject_error = false) {
2888  char * p_buf;
2889  // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2890  size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2891  size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2892
2893  // first reserve enough address space in advance since we want to be
2894  // able to break a single contiguous virtual address range into multiple
2895  // large page commits but WS2003 does not allow reserving large page space
2896  // so we just use 4K pages for reserve, this gives us a legal contiguous
2897  // address space. then we will deallocate that reservation, and re alloc
2898  // using large pages
2899  const size_t size_of_reserve = bytes + chunk_size;
2900  if (bytes > size_of_reserve) {
2901    // Overflowed.
2902    return NULL;
2903  }
2904  p_buf = (char *) VirtualAlloc(addr,
2905                                size_of_reserve,  // size of Reserve
2906                                MEM_RESERVE,
2907                                PAGE_READWRITE);
2908  // If reservation failed, return NULL
2909  if (p_buf == NULL) return NULL;
2910  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2911  os::release_memory(p_buf, bytes + chunk_size);
2912
2913  // we still need to round up to a page boundary (in case we are using large pages)
2914  // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2915  // instead we handle this in the bytes_to_rq computation below
2916  p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2917
2918  // now go through and allocate one chunk at a time until all bytes are
2919  // allocated
2920  size_t  bytes_remaining = bytes;
2921  // An overflow of align_size_up() would have been caught above
2922  // in the calculation of size_of_reserve.
2923  char * next_alloc_addr = p_buf;
2924  HANDLE hProc = GetCurrentProcess();
2925
2926#ifdef ASSERT
2927  // Variable for the failure injection
2928  long ran_num = os::random();
2929  size_t fail_after = ran_num % bytes;
2930#endif
2931
2932  int count=0;
2933  while (bytes_remaining) {
2934    // select bytes_to_rq to get to the next chunk_size boundary
2935
2936    size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2937    // Note allocate and commit
2938    char * p_new;
2939
2940#ifdef ASSERT
2941    bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2942#else
2943    const bool inject_error_now = false;
2944#endif
2945
2946    if (inject_error_now) {
2947      p_new = NULL;
2948    } else {
2949      if (!UseNUMAInterleaving) {
2950        p_new = (char *) VirtualAlloc(next_alloc_addr,
2951                                      bytes_to_rq,
2952                                      flags,
2953                                      prot);
2954      } else {
2955        // get the next node to use from the used_node_list
2956        assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2957        DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2958        p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2959      }
2960    }
2961
2962    if (p_new == NULL) {
2963      // Free any allocated pages
2964      if (next_alloc_addr > p_buf) {
2965        // Some memory was committed so release it.
2966        size_t bytes_to_release = bytes - bytes_remaining;
2967        // NMT has yet to record any individual blocks, so it
2968        // need to create a dummy 'reserve' record to match
2969        // the release.
2970        MemTracker::record_virtual_memory_reserve((address)p_buf,
2971                                                  bytes_to_release, CALLER_PC);
2972        os::release_memory(p_buf, bytes_to_release);
2973      }
2974#ifdef ASSERT
2975      if (should_inject_error) {
2976        if (TracePageSizes && Verbose) {
2977          tty->print_cr("Reserving pages individually failed.");
2978        }
2979      }
2980#endif
2981      return NULL;
2982    }
2983
2984    bytes_remaining -= bytes_to_rq;
2985    next_alloc_addr += bytes_to_rq;
2986    count++;
2987  }
2988  // Although the memory is allocated individually, it is returned as one.
2989  // NMT records it as one block.
2990  if ((flags & MEM_COMMIT) != 0) {
2991    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2992  } else {
2993    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2994  }
2995
2996  // made it this far, success
2997  return p_buf;
2998}
2999
3000
3001
3002void os::large_page_init() {
3003  if (!UseLargePages) return;
3004
3005  // print a warning if any large page related flag is specified on command line
3006  bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3007                         !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3008  bool success = false;
3009
3010#define WARN(msg) if (warn_on_failure) { warning(msg); }
3011  if (request_lock_memory_privilege()) {
3012    size_t s = GetLargePageMinimum();
3013    if (s) {
3014#if defined(IA32) || defined(AMD64)
3015      if (s > 4*M || LargePageSizeInBytes > 4*M) {
3016        WARN("JVM cannot use large pages bigger than 4mb.");
3017      } else {
3018#endif
3019        if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3020          _large_page_size = LargePageSizeInBytes;
3021        } else {
3022          _large_page_size = s;
3023        }
3024        success = true;
3025#if defined(IA32) || defined(AMD64)
3026      }
3027#endif
3028    } else {
3029      WARN("Large page is not supported by the processor.");
3030    }
3031  } else {
3032    WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3033  }
3034#undef WARN
3035
3036  const size_t default_page_size = (size_t) vm_page_size();
3037  if (success && _large_page_size > default_page_size) {
3038    _page_sizes[0] = _large_page_size;
3039    _page_sizes[1] = default_page_size;
3040    _page_sizes[2] = 0;
3041  }
3042
3043  cleanup_after_large_page_init();
3044  UseLargePages = success;
3045}
3046
3047// On win32, one cannot release just a part of reserved memory, it's an
3048// all or nothing deal.  When we split a reservation, we must break the
3049// reservation into two reservations.
3050void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3051                                  bool realloc) {
3052  if (size > 0) {
3053    release_memory(base, size);
3054    if (realloc) {
3055      reserve_memory(split, base);
3056    }
3057    if (size != split) {
3058      reserve_memory(size - split, base + split);
3059    }
3060  }
3061}
3062
3063// Multiple threads can race in this code but it's not possible to unmap small sections of
3064// virtual space to get requested alignment, like posix-like os's.
3065// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3066char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3067  assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3068         "Alignment must be a multiple of allocation granularity (page size)");
3069  assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3070
3071  size_t extra_size = size + alignment;
3072  assert(extra_size >= size, "overflow, size is too large to allow alignment");
3073
3074  char* aligned_base = NULL;
3075
3076  do {
3077    char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3078    if (extra_base == NULL) {
3079      return NULL;
3080    }
3081    // Do manual alignment
3082    aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3083
3084    os::release_memory(extra_base, extra_size);
3085
3086    aligned_base = os::reserve_memory(size, aligned_base);
3087
3088  } while (aligned_base == NULL);
3089
3090  return aligned_base;
3091}
3092
3093char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3094  assert((size_t)addr % os::vm_allocation_granularity() == 0,
3095         "reserve alignment");
3096  assert(bytes % os::vm_page_size() == 0, "reserve page size");
3097  char* res;
3098  // note that if UseLargePages is on, all the areas that require interleaving
3099  // will go thru reserve_memory_special rather than thru here.
3100  bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3101  if (!use_individual) {
3102    res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3103  } else {
3104    elapsedTimer reserveTimer;
3105    if (Verbose && PrintMiscellaneous) reserveTimer.start();
3106    // in numa interleaving, we have to allocate pages individually
3107    // (well really chunks of NUMAInterleaveGranularity size)
3108    res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3109    if (res == NULL) {
3110      warning("NUMA page allocation failed");
3111    }
3112    if (Verbose && PrintMiscellaneous) {
3113      reserveTimer.stop();
3114      tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3115                    reserveTimer.milliseconds(), reserveTimer.ticks());
3116    }
3117  }
3118  assert(res == NULL || addr == NULL || addr == res,
3119         "Unexpected address from reserve.");
3120
3121  return res;
3122}
3123
3124// Reserve memory at an arbitrary address, only if that area is
3125// available (and not reserved for something else).
3126char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3127  // Windows os::reserve_memory() fails of the requested address range is
3128  // not avilable.
3129  return reserve_memory(bytes, requested_addr);
3130}
3131
3132size_t os::large_page_size() {
3133  return _large_page_size;
3134}
3135
3136bool os::can_commit_large_page_memory() {
3137  // Windows only uses large page memory when the entire region is reserved
3138  // and committed in a single VirtualAlloc() call. This may change in the
3139  // future, but with Windows 2003 it's not possible to commit on demand.
3140  return false;
3141}
3142
3143bool os::can_execute_large_page_memory() {
3144  return true;
3145}
3146
3147char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3148                                 bool exec) {
3149  assert(UseLargePages, "only for large pages");
3150
3151  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3152    return NULL; // Fallback to small pages.
3153  }
3154
3155  const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3156  const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3157
3158  // with large pages, there are two cases where we need to use Individual Allocation
3159  // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3160  // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3161  if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3162    if (TracePageSizes && Verbose) {
3163      tty->print_cr("Reserving large pages individually.");
3164    }
3165    char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3166    if (p_buf == NULL) {
3167      // give an appropriate warning message
3168      if (UseNUMAInterleaving) {
3169        warning("NUMA large page allocation failed, UseLargePages flag ignored");
3170      }
3171      if (UseLargePagesIndividualAllocation) {
3172        warning("Individually allocated large pages failed, "
3173                "use -XX:-UseLargePagesIndividualAllocation to turn off");
3174      }
3175      return NULL;
3176    }
3177
3178    return p_buf;
3179
3180  } else {
3181    if (TracePageSizes && Verbose) {
3182      tty->print_cr("Reserving large pages in a single large chunk.");
3183    }
3184    // normal policy just allocate it all at once
3185    DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3186    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3187    if (res != NULL) {
3188      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3189    }
3190
3191    return res;
3192  }
3193}
3194
3195bool os::release_memory_special(char* base, size_t bytes) {
3196  assert(base != NULL, "Sanity check");
3197  return release_memory(base, bytes);
3198}
3199
3200void os::print_statistics() {
3201}
3202
3203static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3204  int err = os::get_last_error();
3205  char buf[256];
3206  size_t buf_len = os::lasterror(buf, sizeof(buf));
3207  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3208          ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3209          exec, buf_len != 0 ? buf : "<no_error_string>", err);
3210}
3211
3212bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3213  if (bytes == 0) {
3214    // Don't bother the OS with noops.
3215    return true;
3216  }
3217  assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3218  assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3219  // Don't attempt to print anything if the OS call fails. We're
3220  // probably low on resources, so the print itself may cause crashes.
3221
3222  // unless we have NUMAInterleaving enabled, the range of a commit
3223  // is always within a reserve covered by a single VirtualAlloc
3224  // in that case we can just do a single commit for the requested size
3225  if (!UseNUMAInterleaving) {
3226    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3227      NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3228      return false;
3229    }
3230    if (exec) {
3231      DWORD oldprot;
3232      // Windows doc says to use VirtualProtect to get execute permissions
3233      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3234        NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3235        return false;
3236      }
3237    }
3238    return true;
3239  } else {
3240
3241    // when NUMAInterleaving is enabled, the commit might cover a range that
3242    // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3243    // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3244    // returns represents the number of bytes that can be committed in one step.
3245    size_t bytes_remaining = bytes;
3246    char * next_alloc_addr = addr;
3247    while (bytes_remaining > 0) {
3248      MEMORY_BASIC_INFORMATION alloc_info;
3249      VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3250      size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3251      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3252                       PAGE_READWRITE) == NULL) {
3253        NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3254                                            exec);)
3255        return false;
3256      }
3257      if (exec) {
3258        DWORD oldprot;
3259        if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3260                            PAGE_EXECUTE_READWRITE, &oldprot)) {
3261          NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3262                                              exec);)
3263          return false;
3264        }
3265      }
3266      bytes_remaining -= bytes_to_rq;
3267      next_alloc_addr += bytes_to_rq;
3268    }
3269  }
3270  // if we made it this far, return true
3271  return true;
3272}
3273
3274bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3275                          bool exec) {
3276  // alignment_hint is ignored on this OS
3277  return pd_commit_memory(addr, size, exec);
3278}
3279
3280void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3281                                  const char* mesg) {
3282  assert(mesg != NULL, "mesg must be specified");
3283  if (!pd_commit_memory(addr, size, exec)) {
3284    warn_fail_commit_memory(addr, size, exec);
3285    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3286  }
3287}
3288
3289void os::pd_commit_memory_or_exit(char* addr, size_t size,
3290                                  size_t alignment_hint, bool exec,
3291                                  const char* mesg) {
3292  // alignment_hint is ignored on this OS
3293  pd_commit_memory_or_exit(addr, size, exec, mesg);
3294}
3295
3296bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3297  if (bytes == 0) {
3298    // Don't bother the OS with noops.
3299    return true;
3300  }
3301  assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3302  assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3303  return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3304}
3305
3306bool os::pd_release_memory(char* addr, size_t bytes) {
3307  return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3308}
3309
3310bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3311  return os::commit_memory(addr, size, !ExecMem);
3312}
3313
3314bool os::remove_stack_guard_pages(char* addr, size_t size) {
3315  return os::uncommit_memory(addr, size);
3316}
3317
3318static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3319  uint count = 0;
3320  bool ret = false;
3321  size_t bytes_remaining = bytes;
3322  char * next_protect_addr = addr;
3323
3324  // Use VirtualQuery() to get the chunk size.
3325  while (bytes_remaining) {
3326    MEMORY_BASIC_INFORMATION alloc_info;
3327    if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3328      return false;
3329    }
3330
3331    size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3332    // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3333    // but we don't distinguish here as both cases are protected by same API.
3334    ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3335    warning("Failed protecting pages individually for chunk #%u", count);
3336    if (!ret) {
3337      return false;
3338    }
3339
3340    bytes_remaining -= bytes_to_protect;
3341    next_protect_addr += bytes_to_protect;
3342    count++;
3343  }
3344  return ret;
3345}
3346
3347// Set protections specified
3348bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3349                        bool is_committed) {
3350  unsigned int p = 0;
3351  switch (prot) {
3352  case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3353  case MEM_PROT_READ: p = PAGE_READONLY; break;
3354  case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3355  case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3356  default:
3357    ShouldNotReachHere();
3358  }
3359
3360  DWORD old_status;
3361
3362  // Strange enough, but on Win32 one can change protection only for committed
3363  // memory, not a big deal anyway, as bytes less or equal than 64K
3364  if (!is_committed) {
3365    commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3366                          "cannot commit protection page");
3367  }
3368  // One cannot use os::guard_memory() here, as on Win32 guard page
3369  // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3370  //
3371  // Pages in the region become guard pages. Any attempt to access a guard page
3372  // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3373  // the guard page status. Guard pages thus act as a one-time access alarm.
3374  bool ret;
3375  if (UseNUMAInterleaving) {
3376    // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3377    // so we must protect the chunks individually.
3378    ret = protect_pages_individually(addr, bytes, p, &old_status);
3379  } else {
3380    ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3381  }
3382#ifdef ASSERT
3383  if (!ret) {
3384    int err = os::get_last_error();
3385    char buf[256];
3386    size_t buf_len = os::lasterror(buf, sizeof(buf));
3387    warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3388          ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3389          buf_len != 0 ? buf : "<no_error_string>", err);
3390  }
3391#endif
3392  return ret;
3393}
3394
3395bool os::guard_memory(char* addr, size_t bytes) {
3396  DWORD old_status;
3397  return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3398}
3399
3400bool os::unguard_memory(char* addr, size_t bytes) {
3401  DWORD old_status;
3402  return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3403}
3404
3405void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3406void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3407void os::numa_make_global(char *addr, size_t bytes)    { }
3408void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3409bool os::numa_topology_changed()                       { return false; }
3410size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3411int os::numa_get_group_id()                            { return 0; }
3412size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3413  if (numa_node_list_holder.get_count() == 0 && size > 0) {
3414    // Provide an answer for UMA systems
3415    ids[0] = 0;
3416    return 1;
3417  } else {
3418    // check for size bigger than actual groups_num
3419    size = MIN2(size, numa_get_groups_num());
3420    for (int i = 0; i < (int)size; i++) {
3421      ids[i] = numa_node_list_holder.get_node_list_entry(i);
3422    }
3423    return size;
3424  }
3425}
3426
3427bool os::get_page_info(char *start, page_info* info) {
3428  return false;
3429}
3430
3431char *os::scan_pages(char *start, char* end, page_info* page_expected,
3432                     page_info* page_found) {
3433  return end;
3434}
3435
3436char* os::non_memory_address_word() {
3437  // Must never look like an address returned by reserve_memory,
3438  // even in its subfields (as defined by the CPU immediate fields,
3439  // if the CPU splits constants across multiple instructions).
3440  return (char*)-1;
3441}
3442
3443#define MAX_ERROR_COUNT 100
3444#define SYS_THREAD_ERROR 0xffffffffUL
3445
3446void os::pd_start_thread(Thread* thread) {
3447  DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3448  // Returns previous suspend state:
3449  // 0:  Thread was not suspended
3450  // 1:  Thread is running now
3451  // >1: Thread is still suspended.
3452  assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3453}
3454
3455class HighResolutionInterval : public CHeapObj<mtThread> {
3456  // The default timer resolution seems to be 10 milliseconds.
3457  // (Where is this written down?)
3458  // If someone wants to sleep for only a fraction of the default,
3459  // then we set the timer resolution down to 1 millisecond for
3460  // the duration of their interval.
3461  // We carefully set the resolution back, since otherwise we
3462  // seem to incur an overhead (3%?) that we don't need.
3463  // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3464  // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3465  // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3466  // timeBeginPeriod() if the relative error exceeded some threshold.
3467  // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3468  // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3469  // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3470  // resolution timers running.
3471 private:
3472  jlong resolution;
3473 public:
3474  HighResolutionInterval(jlong ms) {
3475    resolution = ms % 10L;
3476    if (resolution != 0) {
3477      MMRESULT result = timeBeginPeriod(1L);
3478    }
3479  }
3480  ~HighResolutionInterval() {
3481    if (resolution != 0) {
3482      MMRESULT result = timeEndPeriod(1L);
3483    }
3484    resolution = 0L;
3485  }
3486};
3487
3488int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3489  jlong limit = (jlong) MAXDWORD;
3490
3491  while (ms > limit) {
3492    int res;
3493    if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3494      return res;
3495    }
3496    ms -= limit;
3497  }
3498
3499  assert(thread == Thread::current(), "thread consistency check");
3500  OSThread* osthread = thread->osthread();
3501  OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3502  int result;
3503  if (interruptable) {
3504    assert(thread->is_Java_thread(), "must be java thread");
3505    JavaThread *jt = (JavaThread *) thread;
3506    ThreadBlockInVM tbivm(jt);
3507
3508    jt->set_suspend_equivalent();
3509    // cleared by handle_special_suspend_equivalent_condition() or
3510    // java_suspend_self() via check_and_wait_while_suspended()
3511
3512    HANDLE events[1];
3513    events[0] = osthread->interrupt_event();
3514    HighResolutionInterval *phri=NULL;
3515    if (!ForceTimeHighResolution) {
3516      phri = new HighResolutionInterval(ms);
3517    }
3518    if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3519      result = OS_TIMEOUT;
3520    } else {
3521      ResetEvent(osthread->interrupt_event());
3522      osthread->set_interrupted(false);
3523      result = OS_INTRPT;
3524    }
3525    delete phri; //if it is NULL, harmless
3526
3527    // were we externally suspended while we were waiting?
3528    jt->check_and_wait_while_suspended();
3529  } else {
3530    assert(!thread->is_Java_thread(), "must not be java thread");
3531    Sleep((long) ms);
3532    result = OS_TIMEOUT;
3533  }
3534  return result;
3535}
3536
3537// Short sleep, direct OS call.
3538//
3539// ms = 0, means allow others (if any) to run.
3540//
3541void os::naked_short_sleep(jlong ms) {
3542  assert(ms < 1000, "Un-interruptable sleep, short time use only");
3543  Sleep(ms);
3544}
3545
3546// Sleep forever; naked call to OS-specific sleep; use with CAUTION
3547void os::infinite_sleep() {
3548  while (true) {    // sleep forever ...
3549    Sleep(100000);  // ... 100 seconds at a time
3550  }
3551}
3552
3553typedef BOOL (WINAPI * STTSignature)(void);
3554
3555void os::naked_yield() {
3556  // Consider passing back the return value from SwitchToThread().
3557  SwitchToThread();
3558}
3559
3560// Win32 only gives you access to seven real priorities at a time,
3561// so we compress Java's ten down to seven.  It would be better
3562// if we dynamically adjusted relative priorities.
3563
3564int os::java_to_os_priority[CriticalPriority + 1] = {
3565  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3566  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3567  THREAD_PRIORITY_LOWEST,                       // 2
3568  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3569  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3570  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3571  THREAD_PRIORITY_NORMAL,                       // 6
3572  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3573  THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3574  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3575  THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3576  THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3577};
3578
3579int prio_policy1[CriticalPriority + 1] = {
3580  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3581  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3582  THREAD_PRIORITY_LOWEST,                       // 2
3583  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3584  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3585  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3586  THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3587  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3588  THREAD_PRIORITY_HIGHEST,                      // 8
3589  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3590  THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3591  THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3592};
3593
3594static int prio_init() {
3595  // If ThreadPriorityPolicy is 1, switch tables
3596  if (ThreadPriorityPolicy == 1) {
3597    int i;
3598    for (i = 0; i < CriticalPriority + 1; i++) {
3599      os::java_to_os_priority[i] = prio_policy1[i];
3600    }
3601  }
3602  if (UseCriticalJavaThreadPriority) {
3603    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3604  }
3605  return 0;
3606}
3607
3608OSReturn os::set_native_priority(Thread* thread, int priority) {
3609  if (!UseThreadPriorities) return OS_OK;
3610  bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3611  return ret ? OS_OK : OS_ERR;
3612}
3613
3614OSReturn os::get_native_priority(const Thread* const thread,
3615                                 int* priority_ptr) {
3616  if (!UseThreadPriorities) {
3617    *priority_ptr = java_to_os_priority[NormPriority];
3618    return OS_OK;
3619  }
3620  int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3621  if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3622    assert(false, "GetThreadPriority failed");
3623    return OS_ERR;
3624  }
3625  *priority_ptr = os_prio;
3626  return OS_OK;
3627}
3628
3629
3630// Hint to the underlying OS that a task switch would not be good.
3631// Void return because it's a hint and can fail.
3632void os::hint_no_preempt() {}
3633
3634void os::interrupt(Thread* thread) {
3635  assert(!thread->is_Java_thread() || Thread::current() == thread ||
3636         Threads_lock->owned_by_self(),
3637         "possibility of dangling Thread pointer");
3638
3639  OSThread* osthread = thread->osthread();
3640  osthread->set_interrupted(true);
3641  // More than one thread can get here with the same value of osthread,
3642  // resulting in multiple notifications.  We do, however, want the store
3643  // to interrupted() to be visible to other threads before we post
3644  // the interrupt event.
3645  OrderAccess::release();
3646  SetEvent(osthread->interrupt_event());
3647  // For JSR166:  unpark after setting status
3648  if (thread->is_Java_thread()) {
3649    ((JavaThread*)thread)->parker()->unpark();
3650  }
3651
3652  ParkEvent * ev = thread->_ParkEvent;
3653  if (ev != NULL) ev->unpark();
3654}
3655
3656
3657bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3658  assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3659         "possibility of dangling Thread pointer");
3660
3661  OSThread* osthread = thread->osthread();
3662  // There is no synchronization between the setting of the interrupt
3663  // and it being cleared here. It is critical - see 6535709 - that
3664  // we only clear the interrupt state, and reset the interrupt event,
3665  // if we are going to report that we were indeed interrupted - else
3666  // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3667  // depending on the timing. By checking thread interrupt event to see
3668  // if the thread gets real interrupt thus prevent spurious wakeup.
3669  bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3670  if (interrupted && clear_interrupted) {
3671    osthread->set_interrupted(false);
3672    ResetEvent(osthread->interrupt_event());
3673  } // Otherwise leave the interrupted state alone
3674
3675  return interrupted;
3676}
3677
3678// Get's a pc (hint) for a running thread. Currently used only for profiling.
3679ExtendedPC os::get_thread_pc(Thread* thread) {
3680  CONTEXT context;
3681  context.ContextFlags = CONTEXT_CONTROL;
3682  HANDLE handle = thread->osthread()->thread_handle();
3683#ifdef _M_IA64
3684  assert(0, "Fix get_thread_pc");
3685  return ExtendedPC(NULL);
3686#else
3687  if (GetThreadContext(handle, &context)) {
3688#ifdef _M_AMD64
3689    return ExtendedPC((address) context.Rip);
3690#else
3691    return ExtendedPC((address) context.Eip);
3692#endif
3693  } else {
3694    return ExtendedPC(NULL);
3695  }
3696#endif
3697}
3698
3699// GetCurrentThreadId() returns DWORD
3700intx os::current_thread_id()  { return GetCurrentThreadId(); }
3701
3702static int _initial_pid = 0;
3703
3704int os::current_process_id() {
3705  return (_initial_pid ? _initial_pid : _getpid());
3706}
3707
3708int    os::win32::_vm_page_size              = 0;
3709int    os::win32::_vm_allocation_granularity = 0;
3710int    os::win32::_processor_type            = 0;
3711// Processor level is not available on non-NT systems, use vm_version instead
3712int    os::win32::_processor_level           = 0;
3713julong os::win32::_physical_memory           = 0;
3714size_t os::win32::_default_stack_size        = 0;
3715
3716intx          os::win32::_os_thread_limit    = 0;
3717volatile intx os::win32::_os_thread_count    = 0;
3718
3719bool   os::win32::_is_windows_server         = false;
3720
3721// 6573254
3722// Currently, the bug is observed across all the supported Windows releases,
3723// including the latest one (as of this writing - Windows Server 2012 R2)
3724bool   os::win32::_has_exit_bug              = true;
3725
3726void os::win32::initialize_system_info() {
3727  SYSTEM_INFO si;
3728  GetSystemInfo(&si);
3729  _vm_page_size    = si.dwPageSize;
3730  _vm_allocation_granularity = si.dwAllocationGranularity;
3731  _processor_type  = si.dwProcessorType;
3732  _processor_level = si.wProcessorLevel;
3733  set_processor_count(si.dwNumberOfProcessors);
3734
3735  MEMORYSTATUSEX ms;
3736  ms.dwLength = sizeof(ms);
3737
3738  // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3739  // dwMemoryLoad (% of memory in use)
3740  GlobalMemoryStatusEx(&ms);
3741  _physical_memory = ms.ullTotalPhys;
3742
3743  OSVERSIONINFOEX oi;
3744  oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3745  GetVersionEx((OSVERSIONINFO*)&oi);
3746  switch (oi.dwPlatformId) {
3747  case VER_PLATFORM_WIN32_NT:
3748    {
3749      int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3750      if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3751          oi.wProductType == VER_NT_SERVER) {
3752        _is_windows_server = true;
3753      }
3754    }
3755    break;
3756  default: fatal("Unknown platform");
3757  }
3758
3759  _default_stack_size = os::current_stack_size();
3760  assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3761  assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3762         "stack size not a multiple of page size");
3763
3764  initialize_performance_counter();
3765}
3766
3767
3768HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3769                                      int ebuflen) {
3770  char path[MAX_PATH];
3771  DWORD size;
3772  DWORD pathLen = (DWORD)sizeof(path);
3773  HINSTANCE result = NULL;
3774
3775  // only allow library name without path component
3776  assert(strchr(name, '\\') == NULL, "path not allowed");
3777  assert(strchr(name, ':') == NULL, "path not allowed");
3778  if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3779    jio_snprintf(ebuf, ebuflen,
3780                 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3781    return NULL;
3782  }
3783
3784  // search system directory
3785  if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3786    if (size >= pathLen) {
3787      return NULL; // truncated
3788    }
3789    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3790      return NULL; // truncated
3791    }
3792    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3793      return result;
3794    }
3795  }
3796
3797  // try Windows directory
3798  if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3799    if (size >= pathLen) {
3800      return NULL; // truncated
3801    }
3802    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3803      return NULL; // truncated
3804    }
3805    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3806      return result;
3807    }
3808  }
3809
3810  jio_snprintf(ebuf, ebuflen,
3811               "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3812  return NULL;
3813}
3814
3815#define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3816#define EXIT_TIMEOUT 300000 /* 5 minutes */
3817
3818static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3819  InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3820  return TRUE;
3821}
3822
3823int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3824  // Basic approach:
3825  //  - Each exiting thread registers its intent to exit and then does so.
3826  //  - A thread trying to terminate the process must wait for all
3827  //    threads currently exiting to complete their exit.
3828
3829  if (os::win32::has_exit_bug()) {
3830    // The array holds handles of the threads that have started exiting by calling
3831    // _endthreadex().
3832    // Should be large enough to avoid blocking the exiting thread due to lack of
3833    // a free slot.
3834    static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3835    static int handle_count = 0;
3836
3837    static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3838    static CRITICAL_SECTION crit_sect;
3839    static volatile jint process_exiting = 0;
3840    int i, j;
3841    DWORD res;
3842    HANDLE hproc, hthr;
3843
3844    // The first thread that reached this point, initializes the critical section.
3845    if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3846      warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3847    } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3848      if (what != EPT_THREAD) {
3849        // Atomically set process_exiting before the critical section
3850        // to increase the visibility between racing threads.
3851        Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3852      }
3853      EnterCriticalSection(&crit_sect);
3854
3855      if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3856        // Remove from the array those handles of the threads that have completed exiting.
3857        for (i = 0, j = 0; i < handle_count; ++i) {
3858          res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3859          if (res == WAIT_TIMEOUT) {
3860            handles[j++] = handles[i];
3861          } else {
3862            if (res == WAIT_FAILED) {
3863              warning("WaitForSingleObject failed (%u) in %s: %d\n",
3864                      GetLastError(), __FILE__, __LINE__);
3865            }
3866            // Don't keep the handle, if we failed waiting for it.
3867            CloseHandle(handles[i]);
3868          }
3869        }
3870
3871        // If there's no free slot in the array of the kept handles, we'll have to
3872        // wait until at least one thread completes exiting.
3873        if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3874          // Raise the priority of the oldest exiting thread to increase its chances
3875          // to complete sooner.
3876          SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3877          res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3878          if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3879            i = (res - WAIT_OBJECT_0);
3880            handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3881            for (; i < handle_count; ++i) {
3882              handles[i] = handles[i + 1];
3883            }
3884          } else {
3885            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3886                    (res == WAIT_FAILED ? "failed" : "timed out"),
3887                    GetLastError(), __FILE__, __LINE__);
3888            // Don't keep handles, if we failed waiting for them.
3889            for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3890              CloseHandle(handles[i]);
3891            }
3892            handle_count = 0;
3893          }
3894        }
3895
3896        // Store a duplicate of the current thread handle in the array of handles.
3897        hproc = GetCurrentProcess();
3898        hthr = GetCurrentThread();
3899        if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3900                             0, FALSE, DUPLICATE_SAME_ACCESS)) {
3901          warning("DuplicateHandle failed (%u) in %s: %d\n",
3902                  GetLastError(), __FILE__, __LINE__);
3903        } else {
3904          ++handle_count;
3905        }
3906
3907        // The current exiting thread has stored its handle in the array, and now
3908        // should leave the critical section before calling _endthreadex().
3909
3910      } else if (what != EPT_THREAD && handle_count > 0) {
3911        jlong start_time, finish_time, timeout_left;
3912        // Before ending the process, make sure all the threads that had called
3913        // _endthreadex() completed.
3914
3915        // Set the priority level of the current thread to the same value as
3916        // the priority level of exiting threads.
3917        // This is to ensure it will be given a fair chance to execute if
3918        // the timeout expires.
3919        hthr = GetCurrentThread();
3920        SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3921        start_time = os::javaTimeNanos();
3922        finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3923        for (i = 0; ; ) {
3924          int portion_count = handle_count - i;
3925          if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3926            portion_count = MAXIMUM_WAIT_OBJECTS;
3927          }
3928          for (j = 0; j < portion_count; ++j) {
3929            SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3930          }
3931          timeout_left = (finish_time - start_time) / 1000000L;
3932          if (timeout_left < 0) {
3933            timeout_left = 0;
3934          }
3935          res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3936          if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3937            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3938                    (res == WAIT_FAILED ? "failed" : "timed out"),
3939                    GetLastError(), __FILE__, __LINE__);
3940            // Reset portion_count so we close the remaining
3941            // handles due to this error.
3942            portion_count = handle_count - i;
3943          }
3944          for (j = 0; j < portion_count; ++j) {
3945            CloseHandle(handles[i + j]);
3946          }
3947          if ((i += portion_count) >= handle_count) {
3948            break;
3949          }
3950          start_time = os::javaTimeNanos();
3951        }
3952        handle_count = 0;
3953      }
3954
3955      LeaveCriticalSection(&crit_sect);
3956    }
3957
3958    if (OrderAccess::load_acquire(&process_exiting) != 0 &&
3959        process_exiting != (jint)GetCurrentThreadId()) {
3960      // Some other thread is about to call exit(), so we
3961      // don't let the current thread proceed to exit() or _endthreadex()
3962      while (true) {
3963        SuspendThread(GetCurrentThread());
3964        // Avoid busy-wait loop, if SuspendThread() failed.
3965        Sleep(EXIT_TIMEOUT);
3966      }
3967    }
3968  }
3969
3970  // We are here if either
3971  // - there's no 'race at exit' bug on this OS release;
3972  // - initialization of the critical section failed (unlikely);
3973  // - the current thread has stored its handle and left the critical section;
3974  // - the process-exiting thread has raised the flag and left the critical section.
3975  if (what == EPT_THREAD) {
3976    _endthreadex((unsigned)exit_code);
3977  } else if (what == EPT_PROCESS) {
3978    ::exit(exit_code);
3979  } else {
3980    _exit(exit_code);
3981  }
3982
3983  // Should not reach here
3984  return exit_code;
3985}
3986
3987#undef EXIT_TIMEOUT
3988
3989void os::win32::setmode_streams() {
3990  _setmode(_fileno(stdin), _O_BINARY);
3991  _setmode(_fileno(stdout), _O_BINARY);
3992  _setmode(_fileno(stderr), _O_BINARY);
3993}
3994
3995
3996bool os::is_debugger_attached() {
3997  return IsDebuggerPresent() ? true : false;
3998}
3999
4000
4001void os::wait_for_keypress_at_exit(void) {
4002  if (PauseAtExit) {
4003    fprintf(stderr, "Press any key to continue...\n");
4004    fgetc(stdin);
4005  }
4006}
4007
4008
4009bool os::message_box(const char* title, const char* message) {
4010  int result = MessageBox(NULL, message, title,
4011                          MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4012  return result == IDYES;
4013}
4014
4015#ifndef PRODUCT
4016#ifndef _WIN64
4017// Helpers to check whether NX protection is enabled
4018int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4019  if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4020      pex->ExceptionRecord->NumberParameters > 0 &&
4021      pex->ExceptionRecord->ExceptionInformation[0] ==
4022      EXCEPTION_INFO_EXEC_VIOLATION) {
4023    return EXCEPTION_EXECUTE_HANDLER;
4024  }
4025  return EXCEPTION_CONTINUE_SEARCH;
4026}
4027
4028void nx_check_protection() {
4029  // If NX is enabled we'll get an exception calling into code on the stack
4030  char code[] = { (char)0xC3 }; // ret
4031  void *code_ptr = (void *)code;
4032  __try {
4033    __asm call code_ptr
4034  } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4035    tty->print_raw_cr("NX protection detected.");
4036  }
4037}
4038#endif // _WIN64
4039#endif // PRODUCT
4040
4041// This is called _before_ the global arguments have been parsed
4042void os::init(void) {
4043  _initial_pid = _getpid();
4044
4045  init_random(1234567);
4046
4047  win32::initialize_system_info();
4048  win32::setmode_streams();
4049  init_page_sizes((size_t) win32::vm_page_size());
4050
4051  // This may be overridden later when argument processing is done.
4052  FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4053
4054  // Initialize main_process and main_thread
4055  main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4056  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4057                       &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4058    fatal("DuplicateHandle failed\n");
4059  }
4060  main_thread_id = (int) GetCurrentThreadId();
4061
4062  // initialize fast thread access - only used for 32-bit
4063  win32::initialize_thread_ptr_offset();
4064}
4065
4066// To install functions for atexit processing
4067extern "C" {
4068  static void perfMemory_exit_helper() {
4069    perfMemory_exit();
4070  }
4071}
4072
4073static jint initSock();
4074
4075// this is called _after_ the global arguments have been parsed
4076jint os::init_2(void) {
4077  // Allocate a single page and mark it as readable for safepoint polling
4078  address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4079  guarantee(polling_page != NULL, "Reserve Failed for polling page");
4080
4081  address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4082  guarantee(return_page != NULL, "Commit Failed for polling page");
4083
4084  os::set_polling_page(polling_page);
4085
4086#ifndef PRODUCT
4087  if (Verbose && PrintMiscellaneous) {
4088    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4089               (intptr_t)polling_page);
4090  }
4091#endif
4092
4093  if (!UseMembar) {
4094    address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4095    guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4096
4097    return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4098    guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4099
4100    os::set_memory_serialize_page(mem_serialize_page);
4101
4102#ifndef PRODUCT
4103    if (Verbose && PrintMiscellaneous) {
4104      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4105                 (intptr_t)mem_serialize_page);
4106    }
4107#endif
4108  }
4109
4110  // Setup Windows Exceptions
4111
4112  // for debugging float code generation bugs
4113  if (ForceFloatExceptions) {
4114#ifndef  _WIN64
4115    static long fp_control_word = 0;
4116    __asm { fstcw fp_control_word }
4117    // see Intel PPro Manual, Vol. 2, p 7-16
4118    const long precision = 0x20;
4119    const long underflow = 0x10;
4120    const long overflow  = 0x08;
4121    const long zero_div  = 0x04;
4122    const long denorm    = 0x02;
4123    const long invalid   = 0x01;
4124    fp_control_word |= invalid;
4125    __asm { fldcw fp_control_word }
4126#endif
4127  }
4128
4129  // If stack_commit_size is 0, windows will reserve the default size,
4130  // but only commit a small portion of it.
4131  size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4132  size_t default_reserve_size = os::win32::default_stack_size();
4133  size_t actual_reserve_size = stack_commit_size;
4134  if (stack_commit_size < default_reserve_size) {
4135    // If stack_commit_size == 0, we want this too
4136    actual_reserve_size = default_reserve_size;
4137  }
4138
4139  // Check minimum allowable stack size for thread creation and to initialize
4140  // the java system classes, including StackOverflowError - depends on page
4141  // size.  Add a page for compiler2 recursion in main thread.
4142  // Add in 2*BytesPerWord times page size to account for VM stack during
4143  // class initialization depending on 32 or 64 bit VM.
4144  size_t min_stack_allowed =
4145            (size_t)(JavaThread::stack_yellow_zone_size() + JavaThread::stack_red_zone_size() +
4146                     JavaThread::stack_shadow_zone_size() +
4147                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size());
4148  if (actual_reserve_size < min_stack_allowed) {
4149    tty->print_cr("\nThe stack size specified is too small, "
4150                  "Specify at least %dk",
4151                  min_stack_allowed / K);
4152    return JNI_ERR;
4153  }
4154
4155  JavaThread::set_stack_size_at_create(stack_commit_size);
4156
4157  // Calculate theoretical max. size of Threads to guard gainst artifical
4158  // out-of-memory situations, where all available address-space has been
4159  // reserved by thread stacks.
4160  assert(actual_reserve_size != 0, "Must have a stack");
4161
4162  // Calculate the thread limit when we should start doing Virtual Memory
4163  // banging. Currently when the threads will have used all but 200Mb of space.
4164  //
4165  // TODO: consider performing a similar calculation for commit size instead
4166  // as reserve size, since on a 64-bit platform we'll run into that more
4167  // often than running out of virtual memory space.  We can use the
4168  // lower value of the two calculations as the os_thread_limit.
4169  size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4170  win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4171
4172  // at exit methods are called in the reverse order of their registration.
4173  // there is no limit to the number of functions registered. atexit does
4174  // not set errno.
4175
4176  if (PerfAllowAtExitRegistration) {
4177    // only register atexit functions if PerfAllowAtExitRegistration is set.
4178    // atexit functions can be delayed until process exit time, which
4179    // can be problematic for embedded VM situations. Embedded VMs should
4180    // call DestroyJavaVM() to assure that VM resources are released.
4181
4182    // note: perfMemory_exit_helper atexit function may be removed in
4183    // the future if the appropriate cleanup code can be added to the
4184    // VM_Exit VMOperation's doit method.
4185    if (atexit(perfMemory_exit_helper) != 0) {
4186      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4187    }
4188  }
4189
4190#ifndef _WIN64
4191  // Print something if NX is enabled (win32 on AMD64)
4192  NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4193#endif
4194
4195  // initialize thread priority policy
4196  prio_init();
4197
4198  if (UseNUMA && !ForceNUMA) {
4199    UseNUMA = false; // We don't fully support this yet
4200  }
4201
4202  if (UseNUMAInterleaving) {
4203    // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4204    bool success = numa_interleaving_init();
4205    if (!success) UseNUMAInterleaving = false;
4206  }
4207
4208  if (initSock() != JNI_OK) {
4209    return JNI_ERR;
4210  }
4211
4212  return JNI_OK;
4213}
4214
4215// Mark the polling page as unreadable
4216void os::make_polling_page_unreadable(void) {
4217  DWORD old_status;
4218  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4219                      PAGE_NOACCESS, &old_status)) {
4220    fatal("Could not disable polling page");
4221  }
4222}
4223
4224// Mark the polling page as readable
4225void os::make_polling_page_readable(void) {
4226  DWORD old_status;
4227  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4228                      PAGE_READONLY, &old_status)) {
4229    fatal("Could not enable polling page");
4230  }
4231}
4232
4233
4234int os::stat(const char *path, struct stat *sbuf) {
4235  char pathbuf[MAX_PATH];
4236  if (strlen(path) > MAX_PATH - 1) {
4237    errno = ENAMETOOLONG;
4238    return -1;
4239  }
4240  os::native_path(strcpy(pathbuf, path));
4241  int ret = ::stat(pathbuf, sbuf);
4242  if (sbuf != NULL && UseUTCFileTimestamp) {
4243    // Fix for 6539723.  st_mtime returned from stat() is dependent on
4244    // the system timezone and so can return different values for the
4245    // same file if/when daylight savings time changes.  This adjustment
4246    // makes sure the same timestamp is returned regardless of the TZ.
4247    //
4248    // See:
4249    // http://msdn.microsoft.com/library/
4250    //   default.asp?url=/library/en-us/sysinfo/base/
4251    //   time_zone_information_str.asp
4252    // and
4253    // http://msdn.microsoft.com/library/default.asp?url=
4254    //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4255    //
4256    // NOTE: there is a insidious bug here:  If the timezone is changed
4257    // after the call to stat() but before 'GetTimeZoneInformation()', then
4258    // the adjustment we do here will be wrong and we'll return the wrong
4259    // value (which will likely end up creating an invalid class data
4260    // archive).  Absent a better API for this, or some time zone locking
4261    // mechanism, we'll have to live with this risk.
4262    TIME_ZONE_INFORMATION tz;
4263    DWORD tzid = GetTimeZoneInformation(&tz);
4264    int daylightBias =
4265      (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4266    sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4267  }
4268  return ret;
4269}
4270
4271
4272#define FT2INT64(ft) \
4273  ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4274
4275
4276// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4277// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4278// of a thread.
4279//
4280// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4281// the fast estimate available on the platform.
4282
4283// current_thread_cpu_time() is not optimized for Windows yet
4284jlong os::current_thread_cpu_time() {
4285  // return user + sys since the cost is the same
4286  return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4287}
4288
4289jlong os::thread_cpu_time(Thread* thread) {
4290  // consistent with what current_thread_cpu_time() returns.
4291  return os::thread_cpu_time(thread, true /* user+sys */);
4292}
4293
4294jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4295  return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4296}
4297
4298jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4299  // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4300  // If this function changes, os::is_thread_cpu_time_supported() should too
4301  FILETIME CreationTime;
4302  FILETIME ExitTime;
4303  FILETIME KernelTime;
4304  FILETIME UserTime;
4305
4306  if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4307                      &ExitTime, &KernelTime, &UserTime) == 0) {
4308    return -1;
4309  } else if (user_sys_cpu_time) {
4310    return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4311  } else {
4312    return FT2INT64(UserTime) * 100;
4313  }
4314}
4315
4316void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4317  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4318  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4319  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4320  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4321}
4322
4323void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4324  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4325  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4326  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4327  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4328}
4329
4330bool os::is_thread_cpu_time_supported() {
4331  // see os::thread_cpu_time
4332  FILETIME CreationTime;
4333  FILETIME ExitTime;
4334  FILETIME KernelTime;
4335  FILETIME UserTime;
4336
4337  if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4338                      &KernelTime, &UserTime) == 0) {
4339    return false;
4340  } else {
4341    return true;
4342  }
4343}
4344
4345// Windows does't provide a loadavg primitive so this is stubbed out for now.
4346// It does have primitives (PDH API) to get CPU usage and run queue length.
4347// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4348// If we wanted to implement loadavg on Windows, we have a few options:
4349//
4350// a) Query CPU usage and run queue length and "fake" an answer by
4351//    returning the CPU usage if it's under 100%, and the run queue
4352//    length otherwise.  It turns out that querying is pretty slow
4353//    on Windows, on the order of 200 microseconds on a fast machine.
4354//    Note that on the Windows the CPU usage value is the % usage
4355//    since the last time the API was called (and the first call
4356//    returns 100%), so we'd have to deal with that as well.
4357//
4358// b) Sample the "fake" answer using a sampling thread and store
4359//    the answer in a global variable.  The call to loadavg would
4360//    just return the value of the global, avoiding the slow query.
4361//
4362// c) Sample a better answer using exponential decay to smooth the
4363//    value.  This is basically the algorithm used by UNIX kernels.
4364//
4365// Note that sampling thread starvation could affect both (b) and (c).
4366int os::loadavg(double loadavg[], int nelem) {
4367  return -1;
4368}
4369
4370
4371// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4372bool os::dont_yield() {
4373  return DontYieldALot;
4374}
4375
4376// This method is a slightly reworked copy of JDK's sysOpen
4377// from src/windows/hpi/src/sys_api_md.c
4378
4379int os::open(const char *path, int oflag, int mode) {
4380  char pathbuf[MAX_PATH];
4381
4382  if (strlen(path) > MAX_PATH - 1) {
4383    errno = ENAMETOOLONG;
4384    return -1;
4385  }
4386  os::native_path(strcpy(pathbuf, path));
4387  return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4388}
4389
4390FILE* os::open(int fd, const char* mode) {
4391  return ::_fdopen(fd, mode);
4392}
4393
4394// Is a (classpath) directory empty?
4395bool os::dir_is_empty(const char* path) {
4396  WIN32_FIND_DATA fd;
4397  HANDLE f = FindFirstFile(path, &fd);
4398  if (f == INVALID_HANDLE_VALUE) {
4399    return true;
4400  }
4401  FindClose(f);
4402  return false;
4403}
4404
4405// create binary file, rewriting existing file if required
4406int os::create_binary_file(const char* path, bool rewrite_existing) {
4407  int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4408  if (!rewrite_existing) {
4409    oflags |= _O_EXCL;
4410  }
4411  return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4412}
4413
4414// return current position of file pointer
4415jlong os::current_file_offset(int fd) {
4416  return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4417}
4418
4419// move file pointer to the specified offset
4420jlong os::seek_to_file_offset(int fd, jlong offset) {
4421  return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4422}
4423
4424
4425jlong os::lseek(int fd, jlong offset, int whence) {
4426  return (jlong) ::_lseeki64(fd, offset, whence);
4427}
4428
4429size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4430  OVERLAPPED ov;
4431  DWORD nread;
4432  BOOL result;
4433
4434  ZeroMemory(&ov, sizeof(ov));
4435  ov.Offset = (DWORD)offset;
4436  ov.OffsetHigh = (DWORD)(offset >> 32);
4437
4438  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4439
4440  result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4441
4442  return result ? nread : 0;
4443}
4444
4445
4446// This method is a slightly reworked copy of JDK's sysNativePath
4447// from src/windows/hpi/src/path_md.c
4448
4449// Convert a pathname to native format.  On win32, this involves forcing all
4450// separators to be '\\' rather than '/' (both are legal inputs, but Win95
4451// sometimes rejects '/') and removing redundant separators.  The input path is
4452// assumed to have been converted into the character encoding used by the local
4453// system.  Because this might be a double-byte encoding, care is taken to
4454// treat double-byte lead characters correctly.
4455//
4456// This procedure modifies the given path in place, as the result is never
4457// longer than the original.  There is no error return; this operation always
4458// succeeds.
4459char * os::native_path(char *path) {
4460  char *src = path, *dst = path, *end = path;
4461  char *colon = NULL;  // If a drive specifier is found, this will
4462                       // point to the colon following the drive letter
4463
4464  // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4465  assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4466          && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4467
4468  // Check for leading separators
4469#define isfilesep(c) ((c) == '/' || (c) == '\\')
4470  while (isfilesep(*src)) {
4471    src++;
4472  }
4473
4474  if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4475    // Remove leading separators if followed by drive specifier.  This
4476    // hack is necessary to support file URLs containing drive
4477    // specifiers (e.g., "file://c:/path").  As a side effect,
4478    // "/c:/path" can be used as an alternative to "c:/path".
4479    *dst++ = *src++;
4480    colon = dst;
4481    *dst++ = ':';
4482    src++;
4483  } else {
4484    src = path;
4485    if (isfilesep(src[0]) && isfilesep(src[1])) {
4486      // UNC pathname: Retain first separator; leave src pointed at
4487      // second separator so that further separators will be collapsed
4488      // into the second separator.  The result will be a pathname
4489      // beginning with "\\\\" followed (most likely) by a host name.
4490      src = dst = path + 1;
4491      path[0] = '\\';     // Force first separator to '\\'
4492    }
4493  }
4494
4495  end = dst;
4496
4497  // Remove redundant separators from remainder of path, forcing all
4498  // separators to be '\\' rather than '/'. Also, single byte space
4499  // characters are removed from the end of the path because those
4500  // are not legal ending characters on this operating system.
4501  //
4502  while (*src != '\0') {
4503    if (isfilesep(*src)) {
4504      *dst++ = '\\'; src++;
4505      while (isfilesep(*src)) src++;
4506      if (*src == '\0') {
4507        // Check for trailing separator
4508        end = dst;
4509        if (colon == dst - 2) break;  // "z:\\"
4510        if (dst == path + 1) break;   // "\\"
4511        if (dst == path + 2 && isfilesep(path[0])) {
4512          // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4513          // beginning of a UNC pathname.  Even though it is not, by
4514          // itself, a valid UNC pathname, we leave it as is in order
4515          // to be consistent with the path canonicalizer as well
4516          // as the win32 APIs, which treat this case as an invalid
4517          // UNC pathname rather than as an alias for the root
4518          // directory of the current drive.
4519          break;
4520        }
4521        end = --dst;  // Path does not denote a root directory, so
4522                      // remove trailing separator
4523        break;
4524      }
4525      end = dst;
4526    } else {
4527      if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4528        *dst++ = *src++;
4529        if (*src) *dst++ = *src++;
4530        end = dst;
4531      } else {  // Copy a single-byte character
4532        char c = *src++;
4533        *dst++ = c;
4534        // Space is not a legal ending character
4535        if (c != ' ') end = dst;
4536      }
4537    }
4538  }
4539
4540  *end = '\0';
4541
4542  // For "z:", add "." to work around a bug in the C runtime library
4543  if (colon == dst - 1) {
4544    path[2] = '.';
4545    path[3] = '\0';
4546  }
4547
4548  return path;
4549}
4550
4551// This code is a copy of JDK's sysSetLength
4552// from src/windows/hpi/src/sys_api_md.c
4553
4554int os::ftruncate(int fd, jlong length) {
4555  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4556  long high = (long)(length >> 32);
4557  DWORD ret;
4558
4559  if (h == (HANDLE)(-1)) {
4560    return -1;
4561  }
4562
4563  ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4564  if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4565    return -1;
4566  }
4567
4568  if (::SetEndOfFile(h) == FALSE) {
4569    return -1;
4570  }
4571
4572  return 0;
4573}
4574
4575
4576// This code is a copy of JDK's sysSync
4577// from src/windows/hpi/src/sys_api_md.c
4578// except for the legacy workaround for a bug in Win 98
4579
4580int os::fsync(int fd) {
4581  HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4582
4583  if ((!::FlushFileBuffers(handle)) &&
4584      (GetLastError() != ERROR_ACCESS_DENIED)) {
4585    // from winerror.h
4586    return -1;
4587  }
4588  return 0;
4589}
4590
4591static int nonSeekAvailable(int, long *);
4592static int stdinAvailable(int, long *);
4593
4594#define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4595#define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4596
4597// This code is a copy of JDK's sysAvailable
4598// from src/windows/hpi/src/sys_api_md.c
4599
4600int os::available(int fd, jlong *bytes) {
4601  jlong cur, end;
4602  struct _stati64 stbuf64;
4603
4604  if (::_fstati64(fd, &stbuf64) >= 0) {
4605    int mode = stbuf64.st_mode;
4606    if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4607      int ret;
4608      long lpbytes;
4609      if (fd == 0) {
4610        ret = stdinAvailable(fd, &lpbytes);
4611      } else {
4612        ret = nonSeekAvailable(fd, &lpbytes);
4613      }
4614      (*bytes) = (jlong)(lpbytes);
4615      return ret;
4616    }
4617    if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4618      return FALSE;
4619    } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4620      return FALSE;
4621    } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4622      return FALSE;
4623    }
4624    *bytes = end - cur;
4625    return TRUE;
4626  } else {
4627    return FALSE;
4628  }
4629}
4630
4631// This code is a copy of JDK's nonSeekAvailable
4632// from src/windows/hpi/src/sys_api_md.c
4633
4634static int nonSeekAvailable(int fd, long *pbytes) {
4635  // This is used for available on non-seekable devices
4636  // (like both named and anonymous pipes, such as pipes
4637  //  connected to an exec'd process).
4638  // Standard Input is a special case.
4639  HANDLE han;
4640
4641  if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4642    return FALSE;
4643  }
4644
4645  if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4646    // PeekNamedPipe fails when at EOF.  In that case we
4647    // simply make *pbytes = 0 which is consistent with the
4648    // behavior we get on Solaris when an fd is at EOF.
4649    // The only alternative is to raise an Exception,
4650    // which isn't really warranted.
4651    //
4652    if (::GetLastError() != ERROR_BROKEN_PIPE) {
4653      return FALSE;
4654    }
4655    *pbytes = 0;
4656  }
4657  return TRUE;
4658}
4659
4660#define MAX_INPUT_EVENTS 2000
4661
4662// This code is a copy of JDK's stdinAvailable
4663// from src/windows/hpi/src/sys_api_md.c
4664
4665static int stdinAvailable(int fd, long *pbytes) {
4666  HANDLE han;
4667  DWORD numEventsRead = 0;  // Number of events read from buffer
4668  DWORD numEvents = 0;      // Number of events in buffer
4669  DWORD i = 0;              // Loop index
4670  DWORD curLength = 0;      // Position marker
4671  DWORD actualLength = 0;   // Number of bytes readable
4672  BOOL error = FALSE;       // Error holder
4673  INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4674
4675  if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4676    return FALSE;
4677  }
4678
4679  // Construct an array of input records in the console buffer
4680  error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4681  if (error == 0) {
4682    return nonSeekAvailable(fd, pbytes);
4683  }
4684
4685  // lpBuffer must fit into 64K or else PeekConsoleInput fails
4686  if (numEvents > MAX_INPUT_EVENTS) {
4687    numEvents = MAX_INPUT_EVENTS;
4688  }
4689
4690  lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4691  if (lpBuffer == NULL) {
4692    return FALSE;
4693  }
4694
4695  error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4696  if (error == 0) {
4697    os::free(lpBuffer);
4698    return FALSE;
4699  }
4700
4701  // Examine input records for the number of bytes available
4702  for (i=0; i<numEvents; i++) {
4703    if (lpBuffer[i].EventType == KEY_EVENT) {
4704
4705      KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4706                                      &(lpBuffer[i].Event);
4707      if (keyRecord->bKeyDown == TRUE) {
4708        CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4709        curLength++;
4710        if (*keyPressed == '\r') {
4711          actualLength = curLength;
4712        }
4713      }
4714    }
4715  }
4716
4717  if (lpBuffer != NULL) {
4718    os::free(lpBuffer);
4719  }
4720
4721  *pbytes = (long) actualLength;
4722  return TRUE;
4723}
4724
4725// Map a block of memory.
4726char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4727                        char *addr, size_t bytes, bool read_only,
4728                        bool allow_exec) {
4729  HANDLE hFile;
4730  char* base;
4731
4732  hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4733                     OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4734  if (hFile == NULL) {
4735    if (PrintMiscellaneous && Verbose) {
4736      DWORD err = GetLastError();
4737      tty->print_cr("CreateFile() failed: GetLastError->%ld.", err);
4738    }
4739    return NULL;
4740  }
4741
4742  if (allow_exec) {
4743    // CreateFileMapping/MapViewOfFileEx can't map executable memory
4744    // unless it comes from a PE image (which the shared archive is not.)
4745    // Even VirtualProtect refuses to give execute access to mapped memory
4746    // that was not previously executable.
4747    //
4748    // Instead, stick the executable region in anonymous memory.  Yuck.
4749    // Penalty is that ~4 pages will not be shareable - in the future
4750    // we might consider DLLizing the shared archive with a proper PE
4751    // header so that mapping executable + sharing is possible.
4752
4753    base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4754                                PAGE_READWRITE);
4755    if (base == NULL) {
4756      if (PrintMiscellaneous && Verbose) {
4757        DWORD err = GetLastError();
4758        tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
4759      }
4760      CloseHandle(hFile);
4761      return NULL;
4762    }
4763
4764    DWORD bytes_read;
4765    OVERLAPPED overlapped;
4766    overlapped.Offset = (DWORD)file_offset;
4767    overlapped.OffsetHigh = 0;
4768    overlapped.hEvent = NULL;
4769    // ReadFile guarantees that if the return value is true, the requested
4770    // number of bytes were read before returning.
4771    bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4772    if (!res) {
4773      if (PrintMiscellaneous && Verbose) {
4774        DWORD err = GetLastError();
4775        tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
4776      }
4777      release_memory(base, bytes);
4778      CloseHandle(hFile);
4779      return NULL;
4780    }
4781  } else {
4782    HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4783                                    NULL /* file_name */);
4784    if (hMap == NULL) {
4785      if (PrintMiscellaneous && Verbose) {
4786        DWORD err = GetLastError();
4787        tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err);
4788      }
4789      CloseHandle(hFile);
4790      return NULL;
4791    }
4792
4793    DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4794    base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4795                                  (DWORD)bytes, addr);
4796    if (base == NULL) {
4797      if (PrintMiscellaneous && Verbose) {
4798        DWORD err = GetLastError();
4799        tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
4800      }
4801      CloseHandle(hMap);
4802      CloseHandle(hFile);
4803      return NULL;
4804    }
4805
4806    if (CloseHandle(hMap) == 0) {
4807      if (PrintMiscellaneous && Verbose) {
4808        DWORD err = GetLastError();
4809        tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
4810      }
4811      CloseHandle(hFile);
4812      return base;
4813    }
4814  }
4815
4816  if (allow_exec) {
4817    DWORD old_protect;
4818    DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4819    bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4820
4821    if (!res) {
4822      if (PrintMiscellaneous && Verbose) {
4823        DWORD err = GetLastError();
4824        tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
4825      }
4826      // Don't consider this a hard error, on IA32 even if the
4827      // VirtualProtect fails, we should still be able to execute
4828      CloseHandle(hFile);
4829      return base;
4830    }
4831  }
4832
4833  if (CloseHandle(hFile) == 0) {
4834    if (PrintMiscellaneous && Verbose) {
4835      DWORD err = GetLastError();
4836      tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
4837    }
4838    return base;
4839  }
4840
4841  return base;
4842}
4843
4844
4845// Remap a block of memory.
4846char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4847                          char *addr, size_t bytes, bool read_only,
4848                          bool allow_exec) {
4849  // This OS does not allow existing memory maps to be remapped so we
4850  // have to unmap the memory before we remap it.
4851  if (!os::unmap_memory(addr, bytes)) {
4852    return NULL;
4853  }
4854
4855  // There is a very small theoretical window between the unmap_memory()
4856  // call above and the map_memory() call below where a thread in native
4857  // code may be able to access an address that is no longer mapped.
4858
4859  return os::map_memory(fd, file_name, file_offset, addr, bytes,
4860                        read_only, allow_exec);
4861}
4862
4863
4864// Unmap a block of memory.
4865// Returns true=success, otherwise false.
4866
4867bool os::pd_unmap_memory(char* addr, size_t bytes) {
4868  MEMORY_BASIC_INFORMATION mem_info;
4869  if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4870    if (PrintMiscellaneous && Verbose) {
4871      DWORD err = GetLastError();
4872      tty->print_cr("VirtualQuery() failed: GetLastError->%ld.", err);
4873    }
4874    return false;
4875  }
4876
4877  // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4878  // Instead, executable region was allocated using VirtualAlloc(). See
4879  // pd_map_memory() above.
4880  //
4881  // The following flags should match the 'exec_access' flages used for
4882  // VirtualProtect() in pd_map_memory().
4883  if (mem_info.Protect == PAGE_EXECUTE_READ ||
4884      mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4885    return pd_release_memory(addr, bytes);
4886  }
4887
4888  BOOL result = UnmapViewOfFile(addr);
4889  if (result == 0) {
4890    if (PrintMiscellaneous && Verbose) {
4891      DWORD err = GetLastError();
4892      tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
4893    }
4894    return false;
4895  }
4896  return true;
4897}
4898
4899void os::pause() {
4900  char filename[MAX_PATH];
4901  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4902    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4903  } else {
4904    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4905  }
4906
4907  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4908  if (fd != -1) {
4909    struct stat buf;
4910    ::close(fd);
4911    while (::stat(filename, &buf) == 0) {
4912      Sleep(100);
4913    }
4914  } else {
4915    jio_fprintf(stderr,
4916                "Could not open pause file '%s', continuing immediately.\n", filename);
4917  }
4918}
4919
4920os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4921  assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4922}
4923
4924// See the caveats for this class in os_windows.hpp
4925// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4926// into this method and returns false. If no OS EXCEPTION was raised, returns
4927// true.
4928// The callback is supposed to provide the method that should be protected.
4929//
4930bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4931  assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4932  assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4933         "crash_protection already set?");
4934
4935  bool success = true;
4936  __try {
4937    WatcherThread::watcher_thread()->set_crash_protection(this);
4938    cb.call();
4939  } __except(EXCEPTION_EXECUTE_HANDLER) {
4940    // only for protection, nothing to do
4941    success = false;
4942  }
4943  WatcherThread::watcher_thread()->set_crash_protection(NULL);
4944  return success;
4945}
4946
4947// An Event wraps a win32 "CreateEvent" kernel handle.
4948//
4949// We have a number of choices regarding "CreateEvent" win32 handle leakage:
4950//
4951// 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4952//     field, and call CloseHandle() on the win32 event handle.  Unpark() would
4953//     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4954//     In addition, an unpark() operation might fetch the handle field, but the
4955//     event could recycle between the fetch and the SetEvent() operation.
4956//     SetEvent() would either fail because the handle was invalid, or inadvertently work,
4957//     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
4958//     on an stale but recycled handle would be harmless, but in practice this might
4959//     confuse other non-Sun code, so it's not a viable approach.
4960//
4961// 2:  Once a win32 event handle is associated with an Event, it remains associated
4962//     with the Event.  The event handle is never closed.  This could be construed
4963//     as handle leakage, but only up to the maximum # of threads that have been extant
4964//     at any one time.  This shouldn't be an issue, as windows platforms typically
4965//     permit a process to have hundreds of thousands of open handles.
4966//
4967// 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4968//     and release unused handles.
4969//
4970// 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4971//     It's not clear, however, that we wouldn't be trading one type of leak for another.
4972//
4973// 5.  Use an RCU-like mechanism (Read-Copy Update).
4974//     Or perhaps something similar to Maged Michael's "Hazard pointers".
4975//
4976// We use (2).
4977//
4978// TODO-FIXME:
4979// 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4980// 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4981//     to recover from (or at least detect) the dreaded Windows 841176 bug.
4982// 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4983//     into a single win32 CreateEvent() handle.
4984//
4985// Assumption:
4986//    Only one parker can exist on an event, which is why we allocate
4987//    them per-thread. Multiple unparkers can coexist.
4988//
4989// _Event transitions in park()
4990//   -1 => -1 : illegal
4991//    1 =>  0 : pass - return immediately
4992//    0 => -1 : block; then set _Event to 0 before returning
4993//
4994// _Event transitions in unpark()
4995//    0 => 1 : just return
4996//    1 => 1 : just return
4997//   -1 => either 0 or 1; must signal target thread
4998//         That is, we can safely transition _Event from -1 to either
4999//         0 or 1.
5000//
5001// _Event serves as a restricted-range semaphore.
5002//   -1 : thread is blocked, i.e. there is a waiter
5003//    0 : neutral: thread is running or ready,
5004//        could have been signaled after a wait started
5005//    1 : signaled - thread is running or ready
5006//
5007// Another possible encoding of _Event would be with
5008// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5009//
5010
5011int os::PlatformEvent::park(jlong Millis) {
5012  // Transitions for _Event:
5013  //   -1 => -1 : illegal
5014  //    1 =>  0 : pass - return immediately
5015  //    0 => -1 : block; then set _Event to 0 before returning
5016
5017  guarantee(_ParkHandle != NULL , "Invariant");
5018  guarantee(Millis > 0          , "Invariant");
5019
5020  // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5021  // the initial park() operation.
5022  // Consider: use atomic decrement instead of CAS-loop
5023
5024  int v;
5025  for (;;) {
5026    v = _Event;
5027    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5028  }
5029  guarantee((v == 0) || (v == 1), "invariant");
5030  if (v != 0) return OS_OK;
5031
5032  // Do this the hard way by blocking ...
5033  // TODO: consider a brief spin here, gated on the success of recent
5034  // spin attempts by this thread.
5035  //
5036  // We decompose long timeouts into series of shorter timed waits.
5037  // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5038  // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5039  // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5040  // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5041  // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5042  // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5043  // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5044  // for the already waited time.  This policy does not admit any new outcomes.
5045  // In the future, however, we might want to track the accumulated wait time and
5046  // adjust Millis accordingly if we encounter a spurious wakeup.
5047
5048  const int MAXTIMEOUT = 0x10000000;
5049  DWORD rv = WAIT_TIMEOUT;
5050  while (_Event < 0 && Millis > 0) {
5051    DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5052    if (Millis > MAXTIMEOUT) {
5053      prd = MAXTIMEOUT;
5054    }
5055    rv = ::WaitForSingleObject(_ParkHandle, prd);
5056    assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5057    if (rv == WAIT_TIMEOUT) {
5058      Millis -= prd;
5059    }
5060  }
5061  v = _Event;
5062  _Event = 0;
5063  // see comment at end of os::PlatformEvent::park() below:
5064  OrderAccess::fence();
5065  // If we encounter a nearly simultanous timeout expiry and unpark()
5066  // we return OS_OK indicating we awoke via unpark().
5067  // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5068  return (v >= 0) ? OS_OK : OS_TIMEOUT;
5069}
5070
5071void os::PlatformEvent::park() {
5072  // Transitions for _Event:
5073  //   -1 => -1 : illegal
5074  //    1 =>  0 : pass - return immediately
5075  //    0 => -1 : block; then set _Event to 0 before returning
5076
5077  guarantee(_ParkHandle != NULL, "Invariant");
5078  // Invariant: Only the thread associated with the Event/PlatformEvent
5079  // may call park().
5080  // Consider: use atomic decrement instead of CAS-loop
5081  int v;
5082  for (;;) {
5083    v = _Event;
5084    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5085  }
5086  guarantee((v == 0) || (v == 1), "invariant");
5087  if (v != 0) return;
5088
5089  // Do this the hard way by blocking ...
5090  // TODO: consider a brief spin here, gated on the success of recent
5091  // spin attempts by this thread.
5092  while (_Event < 0) {
5093    DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5094    assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5095  }
5096
5097  // Usually we'll find _Event == 0 at this point, but as
5098  // an optional optimization we clear it, just in case can
5099  // multiple unpark() operations drove _Event up to 1.
5100  _Event = 0;
5101  OrderAccess::fence();
5102  guarantee(_Event >= 0, "invariant");
5103}
5104
5105void os::PlatformEvent::unpark() {
5106  guarantee(_ParkHandle != NULL, "Invariant");
5107
5108  // Transitions for _Event:
5109  //    0 => 1 : just return
5110  //    1 => 1 : just return
5111  //   -1 => either 0 or 1; must signal target thread
5112  //         That is, we can safely transition _Event from -1 to either
5113  //         0 or 1.
5114  // See also: "Semaphores in Plan 9" by Mullender & Cox
5115  //
5116  // Note: Forcing a transition from "-1" to "1" on an unpark() means
5117  // that it will take two back-to-back park() calls for the owning
5118  // thread to block. This has the benefit of forcing a spurious return
5119  // from the first park() call after an unpark() call which will help
5120  // shake out uses of park() and unpark() without condition variables.
5121
5122  if (Atomic::xchg(1, &_Event) >= 0) return;
5123
5124  ::SetEvent(_ParkHandle);
5125}
5126
5127
5128// JSR166
5129// -------------------------------------------------------
5130
5131// The Windows implementation of Park is very straightforward: Basic
5132// operations on Win32 Events turn out to have the right semantics to
5133// use them directly. We opportunistically resuse the event inherited
5134// from Monitor.
5135
5136void Parker::park(bool isAbsolute, jlong time) {
5137  guarantee(_ParkEvent != NULL, "invariant");
5138  // First, demultiplex/decode time arguments
5139  if (time < 0) { // don't wait
5140    return;
5141  } else if (time == 0 && !isAbsolute) {
5142    time = INFINITE;
5143  } else if (isAbsolute) {
5144    time -= os::javaTimeMillis(); // convert to relative time
5145    if (time <= 0) {  // already elapsed
5146      return;
5147    }
5148  } else { // relative
5149    time /= 1000000;  // Must coarsen from nanos to millis
5150    if (time == 0) {  // Wait for the minimal time unit if zero
5151      time = 1;
5152    }
5153  }
5154
5155  JavaThread* thread = JavaThread::current();
5156
5157  // Don't wait if interrupted or already triggered
5158  if (Thread::is_interrupted(thread, false) ||
5159      WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5160    ResetEvent(_ParkEvent);
5161    return;
5162  } else {
5163    ThreadBlockInVM tbivm(thread);
5164    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5165    thread->set_suspend_equivalent();
5166
5167    WaitForSingleObject(_ParkEvent, time);
5168    ResetEvent(_ParkEvent);
5169
5170    // If externally suspended while waiting, re-suspend
5171    if (thread->handle_special_suspend_equivalent_condition()) {
5172      thread->java_suspend_self();
5173    }
5174  }
5175}
5176
5177void Parker::unpark() {
5178  guarantee(_ParkEvent != NULL, "invariant");
5179  SetEvent(_ParkEvent);
5180}
5181
5182// Run the specified command in a separate process. Return its exit value,
5183// or -1 on failure (e.g. can't create a new process).
5184int os::fork_and_exec(char* cmd) {
5185  STARTUPINFO si;
5186  PROCESS_INFORMATION pi;
5187
5188  memset(&si, 0, sizeof(si));
5189  si.cb = sizeof(si);
5190  memset(&pi, 0, sizeof(pi));
5191  BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5192                            cmd,    // command line
5193                            NULL,   // process security attribute
5194                            NULL,   // thread security attribute
5195                            TRUE,   // inherits system handles
5196                            0,      // no creation flags
5197                            NULL,   // use parent's environment block
5198                            NULL,   // use parent's starting directory
5199                            &si,    // (in) startup information
5200                            &pi);   // (out) process information
5201
5202  if (rslt) {
5203    // Wait until child process exits.
5204    WaitForSingleObject(pi.hProcess, INFINITE);
5205
5206    DWORD exit_code;
5207    GetExitCodeProcess(pi.hProcess, &exit_code);
5208
5209    // Close process and thread handles.
5210    CloseHandle(pi.hProcess);
5211    CloseHandle(pi.hThread);
5212
5213    return (int)exit_code;
5214  } else {
5215    return -1;
5216  }
5217}
5218
5219//--------------------------------------------------------------------------------------------------
5220// Non-product code
5221
5222static int mallocDebugIntervalCounter = 0;
5223static int mallocDebugCounter = 0;
5224bool os::check_heap(bool force) {
5225  if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5226  if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5227    // Note: HeapValidate executes two hardware breakpoints when it finds something
5228    // wrong; at these points, eax contains the address of the offending block (I think).
5229    // To get to the exlicit error message(s) below, just continue twice.
5230    //
5231    // Note:  we want to check the CRT heap, which is not necessarily located in the
5232    // process default heap.
5233    HANDLE heap = (HANDLE) _get_heap_handle();
5234    if (!heap) {
5235      return true;
5236    }
5237
5238    // If we fail to lock the heap, then gflags.exe has been used
5239    // or some other special heap flag has been set that prevents
5240    // locking. We don't try to walk a heap we can't lock.
5241    if (HeapLock(heap) != 0) {
5242      PROCESS_HEAP_ENTRY phe;
5243      phe.lpData = NULL;
5244      while (HeapWalk(heap, &phe) != 0) {
5245        if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5246            !HeapValidate(heap, 0, phe.lpData)) {
5247          tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5248          tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
5249          HeapUnlock(heap);
5250          fatal("corrupted C heap");
5251        }
5252      }
5253      DWORD err = GetLastError();
5254      if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
5255        HeapUnlock(heap);
5256        fatal("heap walk aborted with error %d", err);
5257      }
5258      HeapUnlock(heap);
5259    }
5260    mallocDebugIntervalCounter = 0;
5261  }
5262  return true;
5263}
5264
5265
5266bool os::find(address addr, outputStream* st) {
5267  int offset = -1;
5268  bool result = false;
5269  char buf[256];
5270  if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5271    st->print(PTR_FORMAT " ", addr);
5272    if (strlen(buf) < sizeof(buf) - 1) {
5273      char* p = strrchr(buf, '\\');
5274      if (p) {
5275        st->print("%s", p + 1);
5276      } else {
5277        st->print("%s", buf);
5278      }
5279    } else {
5280        // The library name is probably truncated. Let's omit the library name.
5281        // See also JDK-8147512.
5282    }
5283    if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5284      st->print("::%s + 0x%x", buf, offset);
5285    }
5286    st->cr();
5287    result = true;
5288  }
5289  return result;
5290}
5291
5292LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5293  DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5294
5295  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5296    JavaThread* thread = JavaThread::current();
5297    PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5298    address addr = (address) exceptionRecord->ExceptionInformation[1];
5299
5300    if (os::is_memory_serialize_page(thread, addr)) {
5301      return EXCEPTION_CONTINUE_EXECUTION;
5302    }
5303  }
5304
5305  return EXCEPTION_CONTINUE_SEARCH;
5306}
5307
5308// We don't build a headless jre for Windows
5309bool os::is_headless_jre() { return false; }
5310
5311static jint initSock() {
5312  WSADATA wsadata;
5313
5314  if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5315    jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5316                ::GetLastError());
5317    return JNI_ERR;
5318  }
5319  return JNI_OK;
5320}
5321
5322struct hostent* os::get_host_by_name(char* name) {
5323  return (struct hostent*)gethostbyname(name);
5324}
5325
5326int os::socket_close(int fd) {
5327  return ::closesocket(fd);
5328}
5329
5330int os::socket(int domain, int type, int protocol) {
5331  return ::socket(domain, type, protocol);
5332}
5333
5334int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5335  return ::connect(fd, him, len);
5336}
5337
5338int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5339  return ::recv(fd, buf, (int)nBytes, flags);
5340}
5341
5342int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5343  return ::send(fd, buf, (int)nBytes, flags);
5344}
5345
5346int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5347  return ::send(fd, buf, (int)nBytes, flags);
5348}
5349
5350// WINDOWS CONTEXT Flags for THREAD_SAMPLING
5351#if defined(IA32)
5352  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5353#elif defined (AMD64)
5354  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5355#endif
5356
5357// returns true if thread could be suspended,
5358// false otherwise
5359static bool do_suspend(HANDLE* h) {
5360  if (h != NULL) {
5361    if (SuspendThread(*h) != ~0) {
5362      return true;
5363    }
5364  }
5365  return false;
5366}
5367
5368// resume the thread
5369// calling resume on an active thread is a no-op
5370static void do_resume(HANDLE* h) {
5371  if (h != NULL) {
5372    ResumeThread(*h);
5373  }
5374}
5375
5376// retrieve a suspend/resume context capable handle
5377// from the tid. Caller validates handle return value.
5378void get_thread_handle_for_extended_context(HANDLE* h,
5379                                            OSThread::thread_id_t tid) {
5380  if (h != NULL) {
5381    *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5382  }
5383}
5384
5385// Thread sampling implementation
5386//
5387void os::SuspendedThreadTask::internal_do_task() {
5388  CONTEXT    ctxt;
5389  HANDLE     h = NULL;
5390
5391  // get context capable handle for thread
5392  get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5393
5394  // sanity
5395  if (h == NULL || h == INVALID_HANDLE_VALUE) {
5396    return;
5397  }
5398
5399  // suspend the thread
5400  if (do_suspend(&h)) {
5401    ctxt.ContextFlags = sampling_context_flags;
5402    // get thread context
5403    GetThreadContext(h, &ctxt);
5404    SuspendedThreadTaskContext context(_thread, &ctxt);
5405    // pass context to Thread Sampling impl
5406    do_task(context);
5407    // resume thread
5408    do_resume(&h);
5409  }
5410
5411  // close handle
5412  CloseHandle(h);
5413}
5414
5415bool os::start_debugging(char *buf, int buflen) {
5416  int len = (int)strlen(buf);
5417  char *p = &buf[len];
5418
5419  jio_snprintf(p, buflen-len,
5420             "\n\n"
5421             "Do you want to debug the problem?\n\n"
5422             "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5423             "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5424             "Otherwise, select 'No' to abort...",
5425             os::current_process_id(), os::current_thread_id());
5426
5427  bool yes = os::message_box("Unexpected Error", buf);
5428
5429  if (yes) {
5430    // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5431    // exception. If VM is running inside a debugger, the debugger will
5432    // catch the exception. Otherwise, the breakpoint exception will reach
5433    // the default windows exception handler, which can spawn a debugger and
5434    // automatically attach to the dying VM.
5435    os::breakpoint();
5436    yes = false;
5437  }
5438  return yes;
5439}
5440
5441void* os::get_default_process_handle() {
5442  return (void*)GetModuleHandle(NULL);
5443}
5444
5445// Builds a platform dependent Agent_OnLoad_<lib_name> function name
5446// which is used to find statically linked in agents.
5447// Additionally for windows, takes into account __stdcall names.
5448// Parameters:
5449//            sym_name: Symbol in library we are looking for
5450//            lib_name: Name of library to look in, NULL for shared libs.
5451//            is_absolute_path == true if lib_name is absolute path to agent
5452//                                     such as "C:/a/b/L.dll"
5453//            == false if only the base name of the library is passed in
5454//               such as "L"
5455char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5456                                    bool is_absolute_path) {
5457  char *agent_entry_name;
5458  size_t len;
5459  size_t name_len;
5460  size_t prefix_len = strlen(JNI_LIB_PREFIX);
5461  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5462  const char *start;
5463
5464  if (lib_name != NULL) {
5465    len = name_len = strlen(lib_name);
5466    if (is_absolute_path) {
5467      // Need to strip path, prefix and suffix
5468      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5469        lib_name = ++start;
5470      } else {
5471        // Need to check for drive prefix
5472        if ((start = strchr(lib_name, ':')) != NULL) {
5473          lib_name = ++start;
5474        }
5475      }
5476      if (len <= (prefix_len + suffix_len)) {
5477        return NULL;
5478      }
5479      lib_name += prefix_len;
5480      name_len = strlen(lib_name) - suffix_len;
5481    }
5482  }
5483  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5484  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5485  if (agent_entry_name == NULL) {
5486    return NULL;
5487  }
5488  if (lib_name != NULL) {
5489    const char *p = strrchr(sym_name, '@');
5490    if (p != NULL && p != sym_name) {
5491      // sym_name == _Agent_OnLoad@XX
5492      strncpy(agent_entry_name, sym_name, (p - sym_name));
5493      agent_entry_name[(p-sym_name)] = '\0';
5494      // agent_entry_name == _Agent_OnLoad
5495      strcat(agent_entry_name, "_");
5496      strncat(agent_entry_name, lib_name, name_len);
5497      strcat(agent_entry_name, p);
5498      // agent_entry_name == _Agent_OnLoad_lib_name@XX
5499    } else {
5500      strcpy(agent_entry_name, sym_name);
5501      strcat(agent_entry_name, "_");
5502      strncat(agent_entry_name, lib_name, name_len);
5503    }
5504  } else {
5505    strcpy(agent_entry_name, sym_name);
5506  }
5507  return agent_entry_name;
5508}
5509
5510#ifndef PRODUCT
5511
5512// test the code path in reserve_memory_special() that tries to allocate memory in a single
5513// contiguous memory block at a particular address.
5514// The test first tries to find a good approximate address to allocate at by using the same
5515// method to allocate some memory at any address. The test then tries to allocate memory in
5516// the vicinity (not directly after it to avoid possible by-chance use of that location)
5517// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5518// the previously allocated memory is available for allocation. The only actual failure
5519// that is reported is when the test tries to allocate at a particular location but gets a
5520// different valid one. A NULL return value at this point is not considered an error but may
5521// be legitimate.
5522// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5523void TestReserveMemorySpecial_test() {
5524  if (!UseLargePages) {
5525    if (VerboseInternalVMTests) {
5526      tty->print("Skipping test because large pages are disabled");
5527    }
5528    return;
5529  }
5530  // save current value of globals
5531  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5532  bool old_use_numa_interleaving = UseNUMAInterleaving;
5533
5534  // set globals to make sure we hit the correct code path
5535  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5536
5537  // do an allocation at an address selected by the OS to get a good one.
5538  const size_t large_allocation_size = os::large_page_size() * 4;
5539  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5540  if (result == NULL) {
5541    if (VerboseInternalVMTests) {
5542      tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5543                          large_allocation_size);
5544    }
5545  } else {
5546    os::release_memory_special(result, large_allocation_size);
5547
5548    // allocate another page within the recently allocated memory area which seems to be a good location. At least
5549    // we managed to get it once.
5550    const size_t expected_allocation_size = os::large_page_size();
5551    char* expected_location = result + os::large_page_size();
5552    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5553    if (actual_location == NULL) {
5554      if (VerboseInternalVMTests) {
5555        tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5556                            expected_location, large_allocation_size);
5557      }
5558    } else {
5559      // release memory
5560      os::release_memory_special(actual_location, expected_allocation_size);
5561      // only now check, after releasing any memory to avoid any leaks.
5562      assert(actual_location == expected_location,
5563             "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5564             expected_location, expected_allocation_size, actual_location);
5565    }
5566  }
5567
5568  // restore globals
5569  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5570  UseNUMAInterleaving = old_use_numa_interleaving;
5571}
5572#endif // PRODUCT
5573
5574/*
5575  All the defined signal names for Windows.
5576
5577  NOTE that not all of these names are accepted by FindSignal!
5578
5579  For various reasons some of these may be rejected at runtime.
5580
5581  Here are the names currently accepted by a user of sun.misc.Signal with
5582  1.4.1 (ignoring potential interaction with use of chaining, etc):
5583
5584     (LIST TBD)
5585
5586*/
5587int os::get_signal_number(const char* name) {
5588  static const struct {
5589    char* name;
5590    int   number;
5591  } siglabels [] =
5592    // derived from version 6.0 VC98/include/signal.h
5593  {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5594  "FPE",        SIGFPE,         // floating point exception
5595  "SEGV",       SIGSEGV,        // segment violation
5596  "INT",        SIGINT,         // interrupt
5597  "TERM",       SIGTERM,        // software term signal from kill
5598  "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5599  "ILL",        SIGILL};        // illegal instruction
5600  for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++)
5601    if(!strcmp(name, siglabels[i].name))
5602      return siglabels[i].number;
5603  return -1;
5604}
5605
5606// Fast current thread access
5607
5608int os::win32::_thread_ptr_offset = 0;
5609
5610static void call_wrapper_dummy() {}
5611
5612// We need to call the os_exception_wrapper once so that it sets
5613// up the offset from FS of the thread pointer.
5614void os::win32::initialize_thread_ptr_offset() {
5615  os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5616                           NULL, NULL, NULL, NULL);
5617}
5618