os_windows.cpp revision 10164:a5d77b663c2b
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
26#define _WIN32_WINNT 0x0600
27
28// no precompiled headers
29#include "classfile/classLoader.hpp"
30#include "classfile/systemDictionary.hpp"
31#include "classfile/vmSymbols.hpp"
32#include "code/icBuffer.hpp"
33#include "code/vtableStubs.hpp"
34#include "compiler/compileBroker.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/interpreter.hpp"
37#include "jvm_windows.h"
38#include "memory/allocation.inline.hpp"
39#include "memory/filemap.hpp"
40#include "mutex_windows.inline.hpp"
41#include "oops/oop.inline.hpp"
42#include "os_share_windows.hpp"
43#include "os_windows.inline.hpp"
44#include "prims/jniFastGetField.hpp"
45#include "prims/jvm.h"
46#include "prims/jvm_misc.hpp"
47#include "runtime/arguments.hpp"
48#include "runtime/atomic.inline.hpp"
49#include "runtime/extendedPC.hpp"
50#include "runtime/globals.hpp"
51#include "runtime/interfaceSupport.hpp"
52#include "runtime/java.hpp"
53#include "runtime/javaCalls.hpp"
54#include "runtime/mutexLocker.hpp"
55#include "runtime/objectMonitor.hpp"
56#include "runtime/orderAccess.inline.hpp"
57#include "runtime/osThread.hpp"
58#include "runtime/perfMemory.hpp"
59#include "runtime/sharedRuntime.hpp"
60#include "runtime/statSampler.hpp"
61#include "runtime/stubRoutines.hpp"
62#include "runtime/thread.inline.hpp"
63#include "runtime/threadCritical.hpp"
64#include "runtime/timer.hpp"
65#include "runtime/vm_version.hpp"
66#include "semaphore_windows.hpp"
67#include "services/attachListener.hpp"
68#include "services/memTracker.hpp"
69#include "services/runtimeService.hpp"
70#include "utilities/decoder.hpp"
71#include "utilities/defaultStream.hpp"
72#include "utilities/events.hpp"
73#include "utilities/growableArray.hpp"
74#include "utilities/vmError.hpp"
75
76#ifdef _DEBUG
77#include <crtdbg.h>
78#endif
79
80
81#include <windows.h>
82#include <sys/types.h>
83#include <sys/stat.h>
84#include <sys/timeb.h>
85#include <objidl.h>
86#include <shlobj.h>
87
88#include <malloc.h>
89#include <signal.h>
90#include <direct.h>
91#include <errno.h>
92#include <fcntl.h>
93#include <io.h>
94#include <process.h>              // For _beginthreadex(), _endthreadex()
95#include <imagehlp.h>             // For os::dll_address_to_function_name
96// for enumerating dll libraries
97#include <vdmdbg.h>
98
99// for timer info max values which include all bits
100#define ALL_64_BITS CONST64(-1)
101
102// For DLL loading/load error detection
103// Values of PE COFF
104#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
105#define IMAGE_FILE_SIGNATURE_LENGTH 4
106
107static HANDLE main_process;
108static HANDLE main_thread;
109static int    main_thread_id;
110
111static FILETIME process_creation_time;
112static FILETIME process_exit_time;
113static FILETIME process_user_time;
114static FILETIME process_kernel_time;
115
116#ifdef _M_IA64
117  #define __CPU__ ia64
118#else
119  #ifdef _M_AMD64
120    #define __CPU__ amd64
121  #else
122    #define __CPU__ i486
123  #endif
124#endif
125
126// save DLL module handle, used by GetModuleFileName
127
128HINSTANCE vm_lib_handle;
129
130BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
131  switch (reason) {
132  case DLL_PROCESS_ATTACH:
133    vm_lib_handle = hinst;
134    if (ForceTimeHighResolution) {
135      timeBeginPeriod(1L);
136    }
137    break;
138  case DLL_PROCESS_DETACH:
139    if (ForceTimeHighResolution) {
140      timeEndPeriod(1L);
141    }
142    break;
143  default:
144    break;
145  }
146  return true;
147}
148
149static inline double fileTimeAsDouble(FILETIME* time) {
150  const double high  = (double) ((unsigned int) ~0);
151  const double split = 10000000.0;
152  double result = (time->dwLowDateTime / split) +
153                   time->dwHighDateTime * (high/split);
154  return result;
155}
156
157// Implementation of os
158
159bool os::unsetenv(const char* name) {
160  assert(name != NULL, "Null pointer");
161  return (SetEnvironmentVariable(name, NULL) == TRUE);
162}
163
164// No setuid programs under Windows.
165bool os::have_special_privileges() {
166  return false;
167}
168
169
170// This method is  a periodic task to check for misbehaving JNI applications
171// under CheckJNI, we can add any periodic checks here.
172// For Windows at the moment does nothing
173void os::run_periodic_checks() {
174  return;
175}
176
177// previous UnhandledExceptionFilter, if there is one
178static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
179
180LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
181
182void os::init_system_properties_values() {
183  // sysclasspath, java_home, dll_dir
184  {
185    char *home_path;
186    char *dll_path;
187    char *pslash;
188    char *bin = "\\bin";
189    char home_dir[MAX_PATH + 1];
190    char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
191
192    if (alt_home_dir != NULL)  {
193      strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
194      home_dir[MAX_PATH] = '\0';
195    } else {
196      os::jvm_path(home_dir, sizeof(home_dir));
197      // Found the full path to jvm.dll.
198      // Now cut the path to <java_home>/jre if we can.
199      *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
200      pslash = strrchr(home_dir, '\\');
201      if (pslash != NULL) {
202        *pslash = '\0';                   // get rid of \{client|server}
203        pslash = strrchr(home_dir, '\\');
204        if (pslash != NULL) {
205          *pslash = '\0';                 // get rid of \bin
206        }
207      }
208    }
209
210    home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
211    if (home_path == NULL) {
212      return;
213    }
214    strcpy(home_path, home_dir);
215    Arguments::set_java_home(home_path);
216    FREE_C_HEAP_ARRAY(char, home_path);
217
218    dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
219                                mtInternal);
220    if (dll_path == NULL) {
221      return;
222    }
223    strcpy(dll_path, home_dir);
224    strcat(dll_path, bin);
225    Arguments::set_dll_dir(dll_path);
226    FREE_C_HEAP_ARRAY(char, dll_path);
227
228    if (!set_boot_path('\\', ';')) {
229      return;
230    }
231  }
232
233// library_path
234#define EXT_DIR "\\lib\\ext"
235#define BIN_DIR "\\bin"
236#define PACKAGE_DIR "\\Sun\\Java"
237  {
238    // Win32 library search order (See the documentation for LoadLibrary):
239    //
240    // 1. The directory from which application is loaded.
241    // 2. The system wide Java Extensions directory (Java only)
242    // 3. System directory (GetSystemDirectory)
243    // 4. Windows directory (GetWindowsDirectory)
244    // 5. The PATH environment variable
245    // 6. The current directory
246
247    char *library_path;
248    char tmp[MAX_PATH];
249    char *path_str = ::getenv("PATH");
250
251    library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
252                                    sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
253
254    library_path[0] = '\0';
255
256    GetModuleFileName(NULL, tmp, sizeof(tmp));
257    *(strrchr(tmp, '\\')) = '\0';
258    strcat(library_path, tmp);
259
260    GetWindowsDirectory(tmp, sizeof(tmp));
261    strcat(library_path, ";");
262    strcat(library_path, tmp);
263    strcat(library_path, PACKAGE_DIR BIN_DIR);
264
265    GetSystemDirectory(tmp, sizeof(tmp));
266    strcat(library_path, ";");
267    strcat(library_path, tmp);
268
269    GetWindowsDirectory(tmp, sizeof(tmp));
270    strcat(library_path, ";");
271    strcat(library_path, tmp);
272
273    if (path_str) {
274      strcat(library_path, ";");
275      strcat(library_path, path_str);
276    }
277
278    strcat(library_path, ";.");
279
280    Arguments::set_library_path(library_path);
281    FREE_C_HEAP_ARRAY(char, library_path);
282  }
283
284  // Default extensions directory
285  {
286    char path[MAX_PATH];
287    char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
288    GetWindowsDirectory(path, MAX_PATH);
289    sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
290            path, PACKAGE_DIR, EXT_DIR);
291    Arguments::set_ext_dirs(buf);
292  }
293  #undef EXT_DIR
294  #undef BIN_DIR
295  #undef PACKAGE_DIR
296
297#ifndef _WIN64
298  // set our UnhandledExceptionFilter and save any previous one
299  prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
300#endif
301
302  // Done
303  return;
304}
305
306void os::breakpoint() {
307  DebugBreak();
308}
309
310// Invoked from the BREAKPOINT Macro
311extern "C" void breakpoint() {
312  os::breakpoint();
313}
314
315// RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
316// So far, this method is only used by Native Memory Tracking, which is
317// only supported on Windows XP or later.
318//
319int os::get_native_stack(address* stack, int frames, int toSkip) {
320#ifdef _NMT_NOINLINE_
321  toSkip++;
322#endif
323  int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
324  for (int index = captured; index < frames; index ++) {
325    stack[index] = NULL;
326  }
327  return captured;
328}
329
330
331// os::current_stack_base()
332//
333//   Returns the base of the stack, which is the stack's
334//   starting address.  This function must be called
335//   while running on the stack of the thread being queried.
336
337address os::current_stack_base() {
338  MEMORY_BASIC_INFORMATION minfo;
339  address stack_bottom;
340  size_t stack_size;
341
342  VirtualQuery(&minfo, &minfo, sizeof(minfo));
343  stack_bottom =  (address)minfo.AllocationBase;
344  stack_size = minfo.RegionSize;
345
346  // Add up the sizes of all the regions with the same
347  // AllocationBase.
348  while (1) {
349    VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
350    if (stack_bottom == (address)minfo.AllocationBase) {
351      stack_size += minfo.RegionSize;
352    } else {
353      break;
354    }
355  }
356
357#ifdef _M_IA64
358  // IA64 has memory and register stacks
359  //
360  // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
361  // at thread creation (1MB backing store growing upwards, 1MB memory stack
362  // growing downwards, 2MB summed up)
363  //
364  // ...
365  // ------- top of stack (high address) -----
366  // |
367  // |      1MB
368  // |      Backing Store (Register Stack)
369  // |
370  // |         / \
371  // |          |
372  // |          |
373  // |          |
374  // ------------------------ stack base -----
375  // |      1MB
376  // |      Memory Stack
377  // |
378  // |          |
379  // |          |
380  // |          |
381  // |         \ /
382  // |
383  // ----- bottom of stack (low address) -----
384  // ...
385
386  stack_size = stack_size / 2;
387#endif
388  return stack_bottom + stack_size;
389}
390
391size_t os::current_stack_size() {
392  size_t sz;
393  MEMORY_BASIC_INFORMATION minfo;
394  VirtualQuery(&minfo, &minfo, sizeof(minfo));
395  sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
396  return sz;
397}
398
399struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
400  const struct tm* time_struct_ptr = localtime(clock);
401  if (time_struct_ptr != NULL) {
402    *res = *time_struct_ptr;
403    return res;
404  }
405  return NULL;
406}
407
408LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
409
410// Thread start routine for all new Java threads
411static unsigned __stdcall java_start(Thread* thread) {
412  // Try to randomize the cache line index of hot stack frames.
413  // This helps when threads of the same stack traces evict each other's
414  // cache lines. The threads can be either from the same JVM instance, or
415  // from different JVM instances. The benefit is especially true for
416  // processors with hyperthreading technology.
417  static int counter = 0;
418  int pid = os::current_process_id();
419  _alloca(((pid ^ counter++) & 7) * 128);
420
421  thread->initialize_thread_current();
422
423  OSThread* osthr = thread->osthread();
424  assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
425
426  if (UseNUMA) {
427    int lgrp_id = os::numa_get_group_id();
428    if (lgrp_id != -1) {
429      thread->set_lgrp_id(lgrp_id);
430    }
431  }
432
433  // Diagnostic code to investigate JDK-6573254
434  int res = 30115;  // non-java thread
435  if (thread->is_Java_thread()) {
436    res = 20115;    // java thread
437  }
438
439  // Install a win32 structured exception handler around every thread created
440  // by VM, so VM can generate error dump when an exception occurred in non-
441  // Java thread (e.g. VM thread).
442  __try {
443    thread->run();
444  } __except(topLevelExceptionFilter(
445                                     (_EXCEPTION_POINTERS*)_exception_info())) {
446    // Nothing to do.
447  }
448
449  // One less thread is executing
450  // When the VMThread gets here, the main thread may have already exited
451  // which frees the CodeHeap containing the Atomic::add code
452  if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
453    Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
454  }
455
456  // Thread must not return from exit_process_or_thread(), but if it does,
457  // let it proceed to exit normally
458  return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
459}
460
461static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
462                                  int thread_id) {
463  // Allocate the OSThread object
464  OSThread* osthread = new OSThread(NULL, NULL);
465  if (osthread == NULL) return NULL;
466
467  // Initialize support for Java interrupts
468  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
469  if (interrupt_event == NULL) {
470    delete osthread;
471    return NULL;
472  }
473  osthread->set_interrupt_event(interrupt_event);
474
475  // Store info on the Win32 thread into the OSThread
476  osthread->set_thread_handle(thread_handle);
477  osthread->set_thread_id(thread_id);
478
479  if (UseNUMA) {
480    int lgrp_id = os::numa_get_group_id();
481    if (lgrp_id != -1) {
482      thread->set_lgrp_id(lgrp_id);
483    }
484  }
485
486  // Initial thread state is INITIALIZED, not SUSPENDED
487  osthread->set_state(INITIALIZED);
488
489  return osthread;
490}
491
492
493bool os::create_attached_thread(JavaThread* thread) {
494#ifdef ASSERT
495  thread->verify_not_published();
496#endif
497  HANDLE thread_h;
498  if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
499                       &thread_h, THREAD_ALL_ACCESS, false, 0)) {
500    fatal("DuplicateHandle failed\n");
501  }
502  OSThread* osthread = create_os_thread(thread, thread_h,
503                                        (int)current_thread_id());
504  if (osthread == NULL) {
505    return false;
506  }
507
508  // Initial thread state is RUNNABLE
509  osthread->set_state(RUNNABLE);
510
511  thread->set_osthread(osthread);
512  return true;
513}
514
515bool os::create_main_thread(JavaThread* thread) {
516#ifdef ASSERT
517  thread->verify_not_published();
518#endif
519  if (_starting_thread == NULL) {
520    _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
521    if (_starting_thread == NULL) {
522      return false;
523    }
524  }
525
526  // The primordial thread is runnable from the start)
527  _starting_thread->set_state(RUNNABLE);
528
529  thread->set_osthread(_starting_thread);
530  return true;
531}
532
533// Allocate and initialize a new OSThread
534bool os::create_thread(Thread* thread, ThreadType thr_type,
535                       size_t stack_size) {
536  unsigned thread_id;
537
538  // Allocate the OSThread object
539  OSThread* osthread = new OSThread(NULL, NULL);
540  if (osthread == NULL) {
541    return false;
542  }
543
544  // Initialize support for Java interrupts
545  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
546  if (interrupt_event == NULL) {
547    delete osthread;
548    return NULL;
549  }
550  osthread->set_interrupt_event(interrupt_event);
551  osthread->set_interrupted(false);
552
553  thread->set_osthread(osthread);
554
555  if (stack_size == 0) {
556    switch (thr_type) {
557    case os::java_thread:
558      // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
559      if (JavaThread::stack_size_at_create() > 0) {
560        stack_size = JavaThread::stack_size_at_create();
561      }
562      break;
563    case os::compiler_thread:
564      if (CompilerThreadStackSize > 0) {
565        stack_size = (size_t)(CompilerThreadStackSize * K);
566        break;
567      } // else fall through:
568        // use VMThreadStackSize if CompilerThreadStackSize is not defined
569    case os::vm_thread:
570    case os::pgc_thread:
571    case os::cgc_thread:
572    case os::watcher_thread:
573      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
574      break;
575    }
576  }
577
578  // Create the Win32 thread
579  //
580  // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
581  // does not specify stack size. Instead, it specifies the size of
582  // initially committed space. The stack size is determined by
583  // PE header in the executable. If the committed "stack_size" is larger
584  // than default value in the PE header, the stack is rounded up to the
585  // nearest multiple of 1MB. For example if the launcher has default
586  // stack size of 320k, specifying any size less than 320k does not
587  // affect the actual stack size at all, it only affects the initial
588  // commitment. On the other hand, specifying 'stack_size' larger than
589  // default value may cause significant increase in memory usage, because
590  // not only the stack space will be rounded up to MB, but also the
591  // entire space is committed upfront.
592  //
593  // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
594  // for CreateThread() that can treat 'stack_size' as stack size. However we
595  // are not supposed to call CreateThread() directly according to MSDN
596  // document because JVM uses C runtime library. The good news is that the
597  // flag appears to work with _beginthredex() as well.
598
599  HANDLE thread_handle =
600    (HANDLE)_beginthreadex(NULL,
601                           (unsigned)stack_size,
602                           (unsigned (__stdcall *)(void*)) java_start,
603                           thread,
604                           CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION,
605                           &thread_id);
606
607  if (thread_handle == NULL) {
608    // Need to clean up stuff we've allocated so far
609    CloseHandle(osthread->interrupt_event());
610    thread->set_osthread(NULL);
611    delete osthread;
612    return NULL;
613  }
614
615  Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
616
617  // Store info on the Win32 thread into the OSThread
618  osthread->set_thread_handle(thread_handle);
619  osthread->set_thread_id(thread_id);
620
621  // Initial thread state is INITIALIZED, not SUSPENDED
622  osthread->set_state(INITIALIZED);
623
624  // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
625  return true;
626}
627
628
629// Free Win32 resources related to the OSThread
630void os::free_thread(OSThread* osthread) {
631  assert(osthread != NULL, "osthread not set");
632  CloseHandle(osthread->thread_handle());
633  CloseHandle(osthread->interrupt_event());
634  delete osthread;
635}
636
637static jlong first_filetime;
638static jlong initial_performance_count;
639static jlong performance_frequency;
640
641
642jlong as_long(LARGE_INTEGER x) {
643  jlong result = 0; // initialization to avoid warning
644  set_high(&result, x.HighPart);
645  set_low(&result, x.LowPart);
646  return result;
647}
648
649
650jlong os::elapsed_counter() {
651  LARGE_INTEGER count;
652  QueryPerformanceCounter(&count);
653  return as_long(count) - initial_performance_count;
654}
655
656
657jlong os::elapsed_frequency() {
658  return performance_frequency;
659}
660
661
662julong os::available_memory() {
663  return win32::available_memory();
664}
665
666julong os::win32::available_memory() {
667  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
668  // value if total memory is larger than 4GB
669  MEMORYSTATUSEX ms;
670  ms.dwLength = sizeof(ms);
671  GlobalMemoryStatusEx(&ms);
672
673  return (julong)ms.ullAvailPhys;
674}
675
676julong os::physical_memory() {
677  return win32::physical_memory();
678}
679
680bool os::has_allocatable_memory_limit(julong* limit) {
681  MEMORYSTATUSEX ms;
682  ms.dwLength = sizeof(ms);
683  GlobalMemoryStatusEx(&ms);
684#ifdef _LP64
685  *limit = (julong)ms.ullAvailVirtual;
686  return true;
687#else
688  // Limit to 1400m because of the 2gb address space wall
689  *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
690  return true;
691#endif
692}
693
694int os::active_processor_count() {
695  DWORD_PTR lpProcessAffinityMask = 0;
696  DWORD_PTR lpSystemAffinityMask = 0;
697  int proc_count = processor_count();
698  if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
699      GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
700    // Nof active processors is number of bits in process affinity mask
701    int bitcount = 0;
702    while (lpProcessAffinityMask != 0) {
703      lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
704      bitcount++;
705    }
706    return bitcount;
707  } else {
708    return proc_count;
709  }
710}
711
712void os::set_native_thread_name(const char *name) {
713
714  // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
715  //
716  // Note that unfortunately this only works if the process
717  // is already attached to a debugger; debugger must observe
718  // the exception below to show the correct name.
719
720  const DWORD MS_VC_EXCEPTION = 0x406D1388;
721  struct {
722    DWORD dwType;     // must be 0x1000
723    LPCSTR szName;    // pointer to name (in user addr space)
724    DWORD dwThreadID; // thread ID (-1=caller thread)
725    DWORD dwFlags;    // reserved for future use, must be zero
726  } info;
727
728  info.dwType = 0x1000;
729  info.szName = name;
730  info.dwThreadID = -1;
731  info.dwFlags = 0;
732
733  __try {
734    RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
735  } __except(EXCEPTION_CONTINUE_EXECUTION) {}
736}
737
738bool os::distribute_processes(uint length, uint* distribution) {
739  // Not yet implemented.
740  return false;
741}
742
743bool os::bind_to_processor(uint processor_id) {
744  // Not yet implemented.
745  return false;
746}
747
748void os::win32::initialize_performance_counter() {
749  LARGE_INTEGER count;
750  QueryPerformanceFrequency(&count);
751  performance_frequency = as_long(count);
752  QueryPerformanceCounter(&count);
753  initial_performance_count = as_long(count);
754}
755
756
757double os::elapsedTime() {
758  return (double) elapsed_counter() / (double) elapsed_frequency();
759}
760
761
762// Windows format:
763//   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
764// Java format:
765//   Java standards require the number of milliseconds since 1/1/1970
766
767// Constant offset - calculated using offset()
768static jlong  _offset   = 116444736000000000;
769// Fake time counter for reproducible results when debugging
770static jlong  fake_time = 0;
771
772#ifdef ASSERT
773// Just to be safe, recalculate the offset in debug mode
774static jlong _calculated_offset = 0;
775static int   _has_calculated_offset = 0;
776
777jlong offset() {
778  if (_has_calculated_offset) return _calculated_offset;
779  SYSTEMTIME java_origin;
780  java_origin.wYear          = 1970;
781  java_origin.wMonth         = 1;
782  java_origin.wDayOfWeek     = 0; // ignored
783  java_origin.wDay           = 1;
784  java_origin.wHour          = 0;
785  java_origin.wMinute        = 0;
786  java_origin.wSecond        = 0;
787  java_origin.wMilliseconds  = 0;
788  FILETIME jot;
789  if (!SystemTimeToFileTime(&java_origin, &jot)) {
790    fatal("Error = %d\nWindows error", GetLastError());
791  }
792  _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
793  _has_calculated_offset = 1;
794  assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
795  return _calculated_offset;
796}
797#else
798jlong offset() {
799  return _offset;
800}
801#endif
802
803jlong windows_to_java_time(FILETIME wt) {
804  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
805  return (a - offset()) / 10000;
806}
807
808// Returns time ticks in (10th of micro seconds)
809jlong windows_to_time_ticks(FILETIME wt) {
810  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
811  return (a - offset());
812}
813
814FILETIME java_to_windows_time(jlong l) {
815  jlong a = (l * 10000) + offset();
816  FILETIME result;
817  result.dwHighDateTime = high(a);
818  result.dwLowDateTime  = low(a);
819  return result;
820}
821
822bool os::supports_vtime() { return true; }
823bool os::enable_vtime() { return false; }
824bool os::vtime_enabled() { return false; }
825
826double os::elapsedVTime() {
827  FILETIME created;
828  FILETIME exited;
829  FILETIME kernel;
830  FILETIME user;
831  if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
832    // the resolution of windows_to_java_time() should be sufficient (ms)
833    return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
834  } else {
835    return elapsedTime();
836  }
837}
838
839jlong os::javaTimeMillis() {
840  if (UseFakeTimers) {
841    return fake_time++;
842  } else {
843    FILETIME wt;
844    GetSystemTimeAsFileTime(&wt);
845    return windows_to_java_time(wt);
846  }
847}
848
849void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
850  FILETIME wt;
851  GetSystemTimeAsFileTime(&wt);
852  jlong ticks = windows_to_time_ticks(wt); // 10th of micros
853  jlong secs = jlong(ticks / 10000000); // 10000 * 1000
854  seconds = secs;
855  nanos = jlong(ticks - (secs*10000000)) * 100;
856}
857
858jlong os::javaTimeNanos() {
859    LARGE_INTEGER current_count;
860    QueryPerformanceCounter(&current_count);
861    double current = as_long(current_count);
862    double freq = performance_frequency;
863    jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
864    return time;
865}
866
867void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
868  jlong freq = performance_frequency;
869  if (freq < NANOSECS_PER_SEC) {
870    // the performance counter is 64 bits and we will
871    // be multiplying it -- so no wrap in 64 bits
872    info_ptr->max_value = ALL_64_BITS;
873  } else if (freq > NANOSECS_PER_SEC) {
874    // use the max value the counter can reach to
875    // determine the max value which could be returned
876    julong max_counter = (julong)ALL_64_BITS;
877    info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
878  } else {
879    // the performance counter is 64 bits and we will
880    // be using it directly -- so no wrap in 64 bits
881    info_ptr->max_value = ALL_64_BITS;
882  }
883
884  // using a counter, so no skipping
885  info_ptr->may_skip_backward = false;
886  info_ptr->may_skip_forward = false;
887
888  info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
889}
890
891char* os::local_time_string(char *buf, size_t buflen) {
892  SYSTEMTIME st;
893  GetLocalTime(&st);
894  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
895               st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
896  return buf;
897}
898
899bool os::getTimesSecs(double* process_real_time,
900                      double* process_user_time,
901                      double* process_system_time) {
902  HANDLE h_process = GetCurrentProcess();
903  FILETIME create_time, exit_time, kernel_time, user_time;
904  BOOL result = GetProcessTimes(h_process,
905                                &create_time,
906                                &exit_time,
907                                &kernel_time,
908                                &user_time);
909  if (result != 0) {
910    FILETIME wt;
911    GetSystemTimeAsFileTime(&wt);
912    jlong rtc_millis = windows_to_java_time(wt);
913    jlong user_millis = windows_to_java_time(user_time);
914    jlong system_millis = windows_to_java_time(kernel_time);
915    *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
916    *process_user_time = ((double) user_millis) / ((double) MILLIUNITS);
917    *process_system_time = ((double) system_millis) / ((double) MILLIUNITS);
918    return true;
919  } else {
920    return false;
921  }
922}
923
924void os::shutdown() {
925  // allow PerfMemory to attempt cleanup of any persistent resources
926  perfMemory_exit();
927
928  // flush buffered output, finish log files
929  ostream_abort();
930
931  // Check for abort hook
932  abort_hook_t abort_hook = Arguments::abort_hook();
933  if (abort_hook != NULL) {
934    abort_hook();
935  }
936}
937
938
939static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
940                                         PMINIDUMP_EXCEPTION_INFORMATION,
941                                         PMINIDUMP_USER_STREAM_INFORMATION,
942                                         PMINIDUMP_CALLBACK_INFORMATION);
943
944static HANDLE dumpFile = NULL;
945
946// Check if dump file can be created.
947void os::check_dump_limit(char* buffer, size_t buffsz) {
948  bool status = true;
949  if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
950    jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
951    status = false;
952  }
953
954#ifndef ASSERT
955  if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
956    jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
957    status = false;
958  }
959#endif
960
961  if (status) {
962    const char* cwd = get_current_directory(NULL, 0);
963    int pid = current_process_id();
964    if (cwd != NULL) {
965      jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
966    } else {
967      jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
968    }
969
970    if (dumpFile == NULL &&
971       (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
972                 == INVALID_HANDLE_VALUE) {
973      jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
974      status = false;
975    }
976  }
977  VMError::record_coredump_status(buffer, status);
978}
979
980void os::abort(bool dump_core, void* siginfo, const void* context) {
981  HINSTANCE dbghelp;
982  EXCEPTION_POINTERS ep;
983  MINIDUMP_EXCEPTION_INFORMATION mei;
984  MINIDUMP_EXCEPTION_INFORMATION* pmei;
985
986  HANDLE hProcess = GetCurrentProcess();
987  DWORD processId = GetCurrentProcessId();
988  MINIDUMP_TYPE dumpType;
989
990  shutdown();
991  if (!dump_core || dumpFile == NULL) {
992    if (dumpFile != NULL) {
993      CloseHandle(dumpFile);
994    }
995    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
996  }
997
998  dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
999
1000  if (dbghelp == NULL) {
1001    jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1002    CloseHandle(dumpFile);
1003    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1004  }
1005
1006  _MiniDumpWriteDump =
1007      CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1008                                    PMINIDUMP_EXCEPTION_INFORMATION,
1009                                    PMINIDUMP_USER_STREAM_INFORMATION,
1010                                    PMINIDUMP_CALLBACK_INFORMATION),
1011                                    GetProcAddress(dbghelp,
1012                                    "MiniDumpWriteDump"));
1013
1014  if (_MiniDumpWriteDump == NULL) {
1015    jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1016    CloseHandle(dumpFile);
1017    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1018  }
1019
1020  dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1021    MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1022
1023  if (siginfo != NULL && context != NULL) {
1024    ep.ContextRecord = (PCONTEXT) context;
1025    ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1026
1027    mei.ThreadId = GetCurrentThreadId();
1028    mei.ExceptionPointers = &ep;
1029    pmei = &mei;
1030  } else {
1031    pmei = NULL;
1032  }
1033
1034  // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1035  // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1036  if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1037      _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1038    jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1039  }
1040  CloseHandle(dumpFile);
1041  win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1042}
1043
1044// Die immediately, no exit hook, no abort hook, no cleanup.
1045void os::die() {
1046  win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1047}
1048
1049// Directory routines copied from src/win32/native/java/io/dirent_md.c
1050//  * dirent_md.c       1.15 00/02/02
1051//
1052// The declarations for DIR and struct dirent are in jvm_win32.h.
1053
1054// Caller must have already run dirname through JVM_NativePath, which removes
1055// duplicate slashes and converts all instances of '/' into '\\'.
1056
1057DIR * os::opendir(const char *dirname) {
1058  assert(dirname != NULL, "just checking");   // hotspot change
1059  DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1060  DWORD fattr;                                // hotspot change
1061  char alt_dirname[4] = { 0, 0, 0, 0 };
1062
1063  if (dirp == 0) {
1064    errno = ENOMEM;
1065    return 0;
1066  }
1067
1068  // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1069  // as a directory in FindFirstFile().  We detect this case here and
1070  // prepend the current drive name.
1071  //
1072  if (dirname[1] == '\0' && dirname[0] == '\\') {
1073    alt_dirname[0] = _getdrive() + 'A' - 1;
1074    alt_dirname[1] = ':';
1075    alt_dirname[2] = '\\';
1076    alt_dirname[3] = '\0';
1077    dirname = alt_dirname;
1078  }
1079
1080  dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1081  if (dirp->path == 0) {
1082    free(dirp);
1083    errno = ENOMEM;
1084    return 0;
1085  }
1086  strcpy(dirp->path, dirname);
1087
1088  fattr = GetFileAttributes(dirp->path);
1089  if (fattr == 0xffffffff) {
1090    free(dirp->path);
1091    free(dirp);
1092    errno = ENOENT;
1093    return 0;
1094  } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1095    free(dirp->path);
1096    free(dirp);
1097    errno = ENOTDIR;
1098    return 0;
1099  }
1100
1101  // Append "*.*", or possibly "\\*.*", to path
1102  if (dirp->path[1] == ':' &&
1103      (dirp->path[2] == '\0' ||
1104      (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1105    // No '\\' needed for cases like "Z:" or "Z:\"
1106    strcat(dirp->path, "*.*");
1107  } else {
1108    strcat(dirp->path, "\\*.*");
1109  }
1110
1111  dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1112  if (dirp->handle == INVALID_HANDLE_VALUE) {
1113    if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1114      free(dirp->path);
1115      free(dirp);
1116      errno = EACCES;
1117      return 0;
1118    }
1119  }
1120  return dirp;
1121}
1122
1123// parameter dbuf unused on Windows
1124struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1125  assert(dirp != NULL, "just checking");      // hotspot change
1126  if (dirp->handle == INVALID_HANDLE_VALUE) {
1127    return 0;
1128  }
1129
1130  strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1131
1132  if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1133    if (GetLastError() == ERROR_INVALID_HANDLE) {
1134      errno = EBADF;
1135      return 0;
1136    }
1137    FindClose(dirp->handle);
1138    dirp->handle = INVALID_HANDLE_VALUE;
1139  }
1140
1141  return &dirp->dirent;
1142}
1143
1144int os::closedir(DIR *dirp) {
1145  assert(dirp != NULL, "just checking");      // hotspot change
1146  if (dirp->handle != INVALID_HANDLE_VALUE) {
1147    if (!FindClose(dirp->handle)) {
1148      errno = EBADF;
1149      return -1;
1150    }
1151    dirp->handle = INVALID_HANDLE_VALUE;
1152  }
1153  free(dirp->path);
1154  free(dirp);
1155  return 0;
1156}
1157
1158// This must be hard coded because it's the system's temporary
1159// directory not the java application's temp directory, ala java.io.tmpdir.
1160const char* os::get_temp_directory() {
1161  static char path_buf[MAX_PATH];
1162  if (GetTempPath(MAX_PATH, path_buf) > 0) {
1163    return path_buf;
1164  } else {
1165    path_buf[0] = '\0';
1166    return path_buf;
1167  }
1168}
1169
1170static bool file_exists(const char* filename) {
1171  if (filename == NULL || strlen(filename) == 0) {
1172    return false;
1173  }
1174  return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1175}
1176
1177bool os::dll_build_name(char *buffer, size_t buflen,
1178                        const char* pname, const char* fname) {
1179  bool retval = false;
1180  const size_t pnamelen = pname ? strlen(pname) : 0;
1181  const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1182
1183  // Return error on buffer overflow.
1184  if (pnamelen + strlen(fname) + 10 > buflen) {
1185    return retval;
1186  }
1187
1188  if (pnamelen == 0) {
1189    jio_snprintf(buffer, buflen, "%s.dll", fname);
1190    retval = true;
1191  } else if (c == ':' || c == '\\') {
1192    jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1193    retval = true;
1194  } else if (strchr(pname, *os::path_separator()) != NULL) {
1195    int n;
1196    char** pelements = split_path(pname, &n);
1197    if (pelements == NULL) {
1198      return false;
1199    }
1200    for (int i = 0; i < n; i++) {
1201      char* path = pelements[i];
1202      // Really shouldn't be NULL, but check can't hurt
1203      size_t plen = (path == NULL) ? 0 : strlen(path);
1204      if (plen == 0) {
1205        continue; // skip the empty path values
1206      }
1207      const char lastchar = path[plen - 1];
1208      if (lastchar == ':' || lastchar == '\\') {
1209        jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1210      } else {
1211        jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1212      }
1213      if (file_exists(buffer)) {
1214        retval = true;
1215        break;
1216      }
1217    }
1218    // release the storage
1219    for (int i = 0; i < n; i++) {
1220      if (pelements[i] != NULL) {
1221        FREE_C_HEAP_ARRAY(char, pelements[i]);
1222      }
1223    }
1224    if (pelements != NULL) {
1225      FREE_C_HEAP_ARRAY(char*, pelements);
1226    }
1227  } else {
1228    jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1229    retval = true;
1230  }
1231  return retval;
1232}
1233
1234// Needs to be in os specific directory because windows requires another
1235// header file <direct.h>
1236const char* os::get_current_directory(char *buf, size_t buflen) {
1237  int n = static_cast<int>(buflen);
1238  if (buflen > INT_MAX)  n = INT_MAX;
1239  return _getcwd(buf, n);
1240}
1241
1242//-----------------------------------------------------------
1243// Helper functions for fatal error handler
1244#ifdef _WIN64
1245// Helper routine which returns true if address in
1246// within the NTDLL address space.
1247//
1248static bool _addr_in_ntdll(address addr) {
1249  HMODULE hmod;
1250  MODULEINFO minfo;
1251
1252  hmod = GetModuleHandle("NTDLL.DLL");
1253  if (hmod == NULL) return false;
1254  if (!GetModuleInformation(GetCurrentProcess(), hmod,
1255                                          &minfo, sizeof(MODULEINFO))) {
1256    return false;
1257  }
1258
1259  if ((addr >= minfo.lpBaseOfDll) &&
1260      (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1261    return true;
1262  } else {
1263    return false;
1264  }
1265}
1266#endif
1267
1268struct _modinfo {
1269  address addr;
1270  char*   full_path;   // point to a char buffer
1271  int     buflen;      // size of the buffer
1272  address base_addr;
1273};
1274
1275static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1276                                  address top_address, void * param) {
1277  struct _modinfo *pmod = (struct _modinfo *)param;
1278  if (!pmod) return -1;
1279
1280  if (base_addr   <= pmod->addr &&
1281      top_address > pmod->addr) {
1282    // if a buffer is provided, copy path name to the buffer
1283    if (pmod->full_path) {
1284      jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1285    }
1286    pmod->base_addr = base_addr;
1287    return 1;
1288  }
1289  return 0;
1290}
1291
1292bool os::dll_address_to_library_name(address addr, char* buf,
1293                                     int buflen, int* offset) {
1294  // buf is not optional, but offset is optional
1295  assert(buf != NULL, "sanity check");
1296
1297// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1298//       return the full path to the DLL file, sometimes it returns path
1299//       to the corresponding PDB file (debug info); sometimes it only
1300//       returns partial path, which makes life painful.
1301
1302  struct _modinfo mi;
1303  mi.addr      = addr;
1304  mi.full_path = buf;
1305  mi.buflen    = buflen;
1306  if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1307    // buf already contains path name
1308    if (offset) *offset = addr - mi.base_addr;
1309    return true;
1310  }
1311
1312  buf[0] = '\0';
1313  if (offset) *offset = -1;
1314  return false;
1315}
1316
1317bool os::dll_address_to_function_name(address addr, char *buf,
1318                                      int buflen, int *offset,
1319                                      bool demangle) {
1320  // buf is not optional, but offset is optional
1321  assert(buf != NULL, "sanity check");
1322
1323  if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1324    return true;
1325  }
1326  if (offset != NULL)  *offset  = -1;
1327  buf[0] = '\0';
1328  return false;
1329}
1330
1331// save the start and end address of jvm.dll into param[0] and param[1]
1332static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1333                           address top_address, void * param) {
1334  if (!param) return -1;
1335
1336  if (base_addr   <= (address)_locate_jvm_dll &&
1337      top_address > (address)_locate_jvm_dll) {
1338    ((address*)param)[0] = base_addr;
1339    ((address*)param)[1] = top_address;
1340    return 1;
1341  }
1342  return 0;
1343}
1344
1345address vm_lib_location[2];    // start and end address of jvm.dll
1346
1347// check if addr is inside jvm.dll
1348bool os::address_is_in_vm(address addr) {
1349  if (!vm_lib_location[0] || !vm_lib_location[1]) {
1350    if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1351      assert(false, "Can't find jvm module.");
1352      return false;
1353    }
1354  }
1355
1356  return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1357}
1358
1359// print module info; param is outputStream*
1360static int _print_module(const char* fname, address base_address,
1361                         address top_address, void* param) {
1362  if (!param) return -1;
1363
1364  outputStream* st = (outputStream*)param;
1365
1366  st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1367  return 0;
1368}
1369
1370// Loads .dll/.so and
1371// in case of error it checks if .dll/.so was built for the
1372// same architecture as Hotspot is running on
1373void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1374  void * result = LoadLibrary(name);
1375  if (result != NULL) {
1376    return result;
1377  }
1378
1379  DWORD errcode = GetLastError();
1380  if (errcode == ERROR_MOD_NOT_FOUND) {
1381    strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1382    ebuf[ebuflen - 1] = '\0';
1383    return NULL;
1384  }
1385
1386  // Parsing dll below
1387  // If we can read dll-info and find that dll was built
1388  // for an architecture other than Hotspot is running in
1389  // - then print to buffer "DLL was built for a different architecture"
1390  // else call os::lasterror to obtain system error message
1391
1392  // Read system error message into ebuf
1393  // It may or may not be overwritten below (in the for loop and just above)
1394  lasterror(ebuf, (size_t) ebuflen);
1395  ebuf[ebuflen - 1] = '\0';
1396  int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1397  if (fd < 0) {
1398    return NULL;
1399  }
1400
1401  uint32_t signature_offset;
1402  uint16_t lib_arch = 0;
1403  bool failed_to_get_lib_arch =
1404    ( // Go to position 3c in the dll
1405     (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1406     ||
1407     // Read location of signature
1408     (sizeof(signature_offset) !=
1409     (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1410     ||
1411     // Go to COFF File Header in dll
1412     // that is located after "signature" (4 bytes long)
1413     (os::seek_to_file_offset(fd,
1414     signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1415     ||
1416     // Read field that contains code of architecture
1417     // that dll was built for
1418     (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1419    );
1420
1421  ::close(fd);
1422  if (failed_to_get_lib_arch) {
1423    // file i/o error - report os::lasterror(...) msg
1424    return NULL;
1425  }
1426
1427  typedef struct {
1428    uint16_t arch_code;
1429    char* arch_name;
1430  } arch_t;
1431
1432  static const arch_t arch_array[] = {
1433    {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1434    {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1435    {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1436  };
1437#if   (defined _M_IA64)
1438  static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1439#elif (defined _M_AMD64)
1440  static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1441#elif (defined _M_IX86)
1442  static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1443#else
1444  #error Method os::dll_load requires that one of following \
1445         is defined :_M_IA64,_M_AMD64 or _M_IX86
1446#endif
1447
1448
1449  // Obtain a string for printf operation
1450  // lib_arch_str shall contain string what platform this .dll was built for
1451  // running_arch_str shall string contain what platform Hotspot was built for
1452  char *running_arch_str = NULL, *lib_arch_str = NULL;
1453  for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1454    if (lib_arch == arch_array[i].arch_code) {
1455      lib_arch_str = arch_array[i].arch_name;
1456    }
1457    if (running_arch == arch_array[i].arch_code) {
1458      running_arch_str = arch_array[i].arch_name;
1459    }
1460  }
1461
1462  assert(running_arch_str,
1463         "Didn't find running architecture code in arch_array");
1464
1465  // If the architecture is right
1466  // but some other error took place - report os::lasterror(...) msg
1467  if (lib_arch == running_arch) {
1468    return NULL;
1469  }
1470
1471  if (lib_arch_str != NULL) {
1472    ::_snprintf(ebuf, ebuflen - 1,
1473                "Can't load %s-bit .dll on a %s-bit platform",
1474                lib_arch_str, running_arch_str);
1475  } else {
1476    // don't know what architecture this dll was build for
1477    ::_snprintf(ebuf, ebuflen - 1,
1478                "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1479                lib_arch, running_arch_str);
1480  }
1481
1482  return NULL;
1483}
1484
1485void os::print_dll_info(outputStream *st) {
1486  st->print_cr("Dynamic libraries:");
1487  get_loaded_modules_info(_print_module, (void *)st);
1488}
1489
1490int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1491  HANDLE   hProcess;
1492
1493# define MAX_NUM_MODULES 128
1494  HMODULE     modules[MAX_NUM_MODULES];
1495  static char filename[MAX_PATH];
1496  int         result = 0;
1497
1498  int pid = os::current_process_id();
1499  hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1500                         FALSE, pid);
1501  if (hProcess == NULL) return 0;
1502
1503  DWORD size_needed;
1504  if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1505    CloseHandle(hProcess);
1506    return 0;
1507  }
1508
1509  // number of modules that are currently loaded
1510  int num_modules = size_needed / sizeof(HMODULE);
1511
1512  for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1513    // Get Full pathname:
1514    if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1515      filename[0] = '\0';
1516    }
1517
1518    MODULEINFO modinfo;
1519    if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1520      modinfo.lpBaseOfDll = NULL;
1521      modinfo.SizeOfImage = 0;
1522    }
1523
1524    // Invoke callback function
1525    result = callback(filename, (address)modinfo.lpBaseOfDll,
1526                      (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1527    if (result) break;
1528  }
1529
1530  CloseHandle(hProcess);
1531  return result;
1532}
1533
1534#ifndef PRODUCT
1535bool os::get_host_name(char* buf, size_t buflen) {
1536  DWORD size = (DWORD)buflen;
1537  return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1538}
1539#endif // PRODUCT
1540
1541void os::get_summary_os_info(char* buf, size_t buflen) {
1542  stringStream sst(buf, buflen);
1543  os::win32::print_windows_version(&sst);
1544  // chop off newline character
1545  char* nl = strchr(buf, '\n');
1546  if (nl != NULL) *nl = '\0';
1547}
1548
1549int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1550  int ret = vsnprintf(buf, len, fmt, args);
1551  // Get the correct buffer size if buf is too small
1552  if (ret < 0) {
1553    return _vscprintf(fmt, args);
1554  }
1555  return ret;
1556}
1557
1558void os::print_os_info_brief(outputStream* st) {
1559  os::print_os_info(st);
1560}
1561
1562void os::print_os_info(outputStream* st) {
1563#ifdef ASSERT
1564  char buffer[1024];
1565  st->print("HostName: ");
1566  if (get_host_name(buffer, sizeof(buffer))) {
1567    st->print("%s ", buffer);
1568  } else {
1569    st->print("N/A ");
1570  }
1571#endif
1572  st->print("OS:");
1573  os::win32::print_windows_version(st);
1574}
1575
1576void os::win32::print_windows_version(outputStream* st) {
1577  OSVERSIONINFOEX osvi;
1578  VS_FIXEDFILEINFO *file_info;
1579  TCHAR kernel32_path[MAX_PATH];
1580  UINT len, ret;
1581
1582  // Use the GetVersionEx information to see if we're on a server or
1583  // workstation edition of Windows. Starting with Windows 8.1 we can't
1584  // trust the OS version information returned by this API.
1585  ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1586  osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1587  if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1588    st->print_cr("Call to GetVersionEx failed");
1589    return;
1590  }
1591  bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1592
1593  // Get the full path to \Windows\System32\kernel32.dll and use that for
1594  // determining what version of Windows we're running on.
1595  len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1596  ret = GetSystemDirectory(kernel32_path, len);
1597  if (ret == 0 || ret > len) {
1598    st->print_cr("Call to GetSystemDirectory failed");
1599    return;
1600  }
1601  strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1602
1603  DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1604  if (version_size == 0) {
1605    st->print_cr("Call to GetFileVersionInfoSize failed");
1606    return;
1607  }
1608
1609  LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1610  if (version_info == NULL) {
1611    st->print_cr("Failed to allocate version_info");
1612    return;
1613  }
1614
1615  if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1616    os::free(version_info);
1617    st->print_cr("Call to GetFileVersionInfo failed");
1618    return;
1619  }
1620
1621  if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1622    os::free(version_info);
1623    st->print_cr("Call to VerQueryValue failed");
1624    return;
1625  }
1626
1627  int major_version = HIWORD(file_info->dwProductVersionMS);
1628  int minor_version = LOWORD(file_info->dwProductVersionMS);
1629  int build_number = HIWORD(file_info->dwProductVersionLS);
1630  int build_minor = LOWORD(file_info->dwProductVersionLS);
1631  int os_vers = major_version * 1000 + minor_version;
1632  os::free(version_info);
1633
1634  st->print(" Windows ");
1635  switch (os_vers) {
1636
1637  case 6000:
1638    if (is_workstation) {
1639      st->print("Vista");
1640    } else {
1641      st->print("Server 2008");
1642    }
1643    break;
1644
1645  case 6001:
1646    if (is_workstation) {
1647      st->print("7");
1648    } else {
1649      st->print("Server 2008 R2");
1650    }
1651    break;
1652
1653  case 6002:
1654    if (is_workstation) {
1655      st->print("8");
1656    } else {
1657      st->print("Server 2012");
1658    }
1659    break;
1660
1661  case 6003:
1662    if (is_workstation) {
1663      st->print("8.1");
1664    } else {
1665      st->print("Server 2012 R2");
1666    }
1667    break;
1668
1669  case 10000:
1670    if (is_workstation) {
1671      st->print("10");
1672    } else {
1673      // The server version name of Windows 10 is not known at this time
1674      st->print("%d.%d", major_version, minor_version);
1675    }
1676    break;
1677
1678  default:
1679    // Unrecognized windows, print out its major and minor versions
1680    st->print("%d.%d", major_version, minor_version);
1681    break;
1682  }
1683
1684  // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1685  // find out whether we are running on 64 bit processor or not
1686  SYSTEM_INFO si;
1687  ZeroMemory(&si, sizeof(SYSTEM_INFO));
1688  GetNativeSystemInfo(&si);
1689  if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1690    st->print(" , 64 bit");
1691  }
1692
1693  st->print(" Build %d", build_number);
1694  st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1695  st->cr();
1696}
1697
1698void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1699  // Nothing to do for now.
1700}
1701
1702void os::get_summary_cpu_info(char* buf, size_t buflen) {
1703  HKEY key;
1704  DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1705               "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1706  if (status == ERROR_SUCCESS) {
1707    DWORD size = (DWORD)buflen;
1708    status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1709    if (status != ERROR_SUCCESS) {
1710        strncpy(buf, "## __CPU__", buflen);
1711    }
1712    RegCloseKey(key);
1713  } else {
1714    // Put generic cpu info to return
1715    strncpy(buf, "## __CPU__", buflen);
1716  }
1717}
1718
1719void os::print_memory_info(outputStream* st) {
1720  st->print("Memory:");
1721  st->print(" %dk page", os::vm_page_size()>>10);
1722
1723  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1724  // value if total memory is larger than 4GB
1725  MEMORYSTATUSEX ms;
1726  ms.dwLength = sizeof(ms);
1727  GlobalMemoryStatusEx(&ms);
1728
1729  st->print(", physical %uk", os::physical_memory() >> 10);
1730  st->print("(%uk free)", os::available_memory() >> 10);
1731
1732  st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1733  st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1734  st->cr();
1735}
1736
1737void os::print_siginfo(outputStream *st, const void* siginfo) {
1738  const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1739  st->print("siginfo:");
1740
1741  char tmp[64];
1742  if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1743    strcpy(tmp, "EXCEPTION_??");
1744  }
1745  st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1746
1747  if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1748       er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1749       er->NumberParameters >= 2) {
1750    switch (er->ExceptionInformation[0]) {
1751    case 0: st->print(", reading address"); break;
1752    case 1: st->print(", writing address"); break;
1753    case 8: st->print(", data execution prevention violation at address"); break;
1754    default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1755                       er->ExceptionInformation[0]);
1756    }
1757    st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1758  } else {
1759    int num = er->NumberParameters;
1760    if (num > 0) {
1761      st->print(", ExceptionInformation=");
1762      for (int i = 0; i < num; i++) {
1763        st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1764      }
1765    }
1766  }
1767  st->cr();
1768}
1769
1770void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1771  // do nothing
1772}
1773
1774static char saved_jvm_path[MAX_PATH] = {0};
1775
1776// Find the full path to the current module, jvm.dll
1777void os::jvm_path(char *buf, jint buflen) {
1778  // Error checking.
1779  if (buflen < MAX_PATH) {
1780    assert(false, "must use a large-enough buffer");
1781    buf[0] = '\0';
1782    return;
1783  }
1784  // Lazy resolve the path to current module.
1785  if (saved_jvm_path[0] != 0) {
1786    strcpy(buf, saved_jvm_path);
1787    return;
1788  }
1789
1790  buf[0] = '\0';
1791  if (Arguments::sun_java_launcher_is_altjvm()) {
1792    // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1793    // for a JAVA_HOME environment variable and fix up the path so it
1794    // looks like jvm.dll is installed there (append a fake suffix
1795    // hotspot/jvm.dll).
1796    char* java_home_var = ::getenv("JAVA_HOME");
1797    if (java_home_var != NULL && java_home_var[0] != 0 &&
1798        strlen(java_home_var) < (size_t)buflen) {
1799      strncpy(buf, java_home_var, buflen);
1800
1801      // determine if this is a legacy image or modules image
1802      // modules image doesn't have "jre" subdirectory
1803      size_t len = strlen(buf);
1804      char* jrebin_p = buf + len;
1805      jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1806      if (0 != _access(buf, 0)) {
1807        jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1808      }
1809      len = strlen(buf);
1810      jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1811    }
1812  }
1813
1814  if (buf[0] == '\0') {
1815    GetModuleFileName(vm_lib_handle, buf, buflen);
1816  }
1817  strncpy(saved_jvm_path, buf, MAX_PATH);
1818  saved_jvm_path[MAX_PATH - 1] = '\0';
1819}
1820
1821
1822void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1823#ifndef _WIN64
1824  st->print("_");
1825#endif
1826}
1827
1828
1829void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1830#ifndef _WIN64
1831  st->print("@%d", args_size  * sizeof(int));
1832#endif
1833}
1834
1835// This method is a copy of JDK's sysGetLastErrorString
1836// from src/windows/hpi/src/system_md.c
1837
1838size_t os::lasterror(char* buf, size_t len) {
1839  DWORD errval;
1840
1841  if ((errval = GetLastError()) != 0) {
1842    // DOS error
1843    size_t n = (size_t)FormatMessage(
1844                                     FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1845                                     NULL,
1846                                     errval,
1847                                     0,
1848                                     buf,
1849                                     (DWORD)len,
1850                                     NULL);
1851    if (n > 3) {
1852      // Drop final '.', CR, LF
1853      if (buf[n - 1] == '\n') n--;
1854      if (buf[n - 1] == '\r') n--;
1855      if (buf[n - 1] == '.') n--;
1856      buf[n] = '\0';
1857    }
1858    return n;
1859  }
1860
1861  if (errno != 0) {
1862    // C runtime error that has no corresponding DOS error code
1863    const char* s = strerror(errno);
1864    size_t n = strlen(s);
1865    if (n >= len) n = len - 1;
1866    strncpy(buf, s, n);
1867    buf[n] = '\0';
1868    return n;
1869  }
1870
1871  return 0;
1872}
1873
1874int os::get_last_error() {
1875  DWORD error = GetLastError();
1876  if (error == 0) {
1877    error = errno;
1878  }
1879  return (int)error;
1880}
1881
1882WindowsSemaphore::WindowsSemaphore(uint value) {
1883  _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1884
1885  guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1886}
1887
1888WindowsSemaphore::~WindowsSemaphore() {
1889  ::CloseHandle(_semaphore);
1890}
1891
1892void WindowsSemaphore::signal(uint count) {
1893  if (count > 0) {
1894    BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1895
1896    assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1897  }
1898}
1899
1900void WindowsSemaphore::wait() {
1901  DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1902  assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1903  assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1904}
1905
1906// sun.misc.Signal
1907// NOTE that this is a workaround for an apparent kernel bug where if
1908// a signal handler for SIGBREAK is installed then that signal handler
1909// takes priority over the console control handler for CTRL_CLOSE_EVENT.
1910// See bug 4416763.
1911static void (*sigbreakHandler)(int) = NULL;
1912
1913static void UserHandler(int sig, void *siginfo, void *context) {
1914  os::signal_notify(sig);
1915  // We need to reinstate the signal handler each time...
1916  os::signal(sig, (void*)UserHandler);
1917}
1918
1919void* os::user_handler() {
1920  return (void*) UserHandler;
1921}
1922
1923void* os::signal(int signal_number, void* handler) {
1924  if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1925    void (*oldHandler)(int) = sigbreakHandler;
1926    sigbreakHandler = (void (*)(int)) handler;
1927    return (void*) oldHandler;
1928  } else {
1929    return (void*)::signal(signal_number, (void (*)(int))handler);
1930  }
1931}
1932
1933void os::signal_raise(int signal_number) {
1934  raise(signal_number);
1935}
1936
1937// The Win32 C runtime library maps all console control events other than ^C
1938// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
1939// logoff, and shutdown events.  We therefore install our own console handler
1940// that raises SIGTERM for the latter cases.
1941//
1942static BOOL WINAPI consoleHandler(DWORD event) {
1943  switch (event) {
1944  case CTRL_C_EVENT:
1945    if (is_error_reported()) {
1946      // Ctrl-C is pressed during error reporting, likely because the error
1947      // handler fails to abort. Let VM die immediately.
1948      os::die();
1949    }
1950
1951    os::signal_raise(SIGINT);
1952    return TRUE;
1953    break;
1954  case CTRL_BREAK_EVENT:
1955    if (sigbreakHandler != NULL) {
1956      (*sigbreakHandler)(SIGBREAK);
1957    }
1958    return TRUE;
1959    break;
1960  case CTRL_LOGOFF_EVENT: {
1961    // Don't terminate JVM if it is running in a non-interactive session,
1962    // such as a service process.
1963    USEROBJECTFLAGS flags;
1964    HANDLE handle = GetProcessWindowStation();
1965    if (handle != NULL &&
1966        GetUserObjectInformation(handle, UOI_FLAGS, &flags,
1967        sizeof(USEROBJECTFLAGS), NULL)) {
1968      // If it is a non-interactive session, let next handler to deal
1969      // with it.
1970      if ((flags.dwFlags & WSF_VISIBLE) == 0) {
1971        return FALSE;
1972      }
1973    }
1974  }
1975  case CTRL_CLOSE_EVENT:
1976  case CTRL_SHUTDOWN_EVENT:
1977    os::signal_raise(SIGTERM);
1978    return TRUE;
1979    break;
1980  default:
1981    break;
1982  }
1983  return FALSE;
1984}
1985
1986// The following code is moved from os.cpp for making this
1987// code platform specific, which it is by its very nature.
1988
1989// Return maximum OS signal used + 1 for internal use only
1990// Used as exit signal for signal_thread
1991int os::sigexitnum_pd() {
1992  return NSIG;
1993}
1994
1995// a counter for each possible signal value, including signal_thread exit signal
1996static volatile jint pending_signals[NSIG+1] = { 0 };
1997static HANDLE sig_sem = NULL;
1998
1999void os::signal_init_pd() {
2000  // Initialize signal structures
2001  memset((void*)pending_signals, 0, sizeof(pending_signals));
2002
2003  sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2004
2005  // Programs embedding the VM do not want it to attempt to receive
2006  // events like CTRL_LOGOFF_EVENT, which are used to implement the
2007  // shutdown hooks mechanism introduced in 1.3.  For example, when
2008  // the VM is run as part of a Windows NT service (i.e., a servlet
2009  // engine in a web server), the correct behavior is for any console
2010  // control handler to return FALSE, not TRUE, because the OS's
2011  // "final" handler for such events allows the process to continue if
2012  // it is a service (while terminating it if it is not a service).
2013  // To make this behavior uniform and the mechanism simpler, we
2014  // completely disable the VM's usage of these console events if -Xrs
2015  // (=ReduceSignalUsage) is specified.  This means, for example, that
2016  // the CTRL-BREAK thread dump mechanism is also disabled in this
2017  // case.  See bugs 4323062, 4345157, and related bugs.
2018
2019  if (!ReduceSignalUsage) {
2020    // Add a CTRL-C handler
2021    SetConsoleCtrlHandler(consoleHandler, TRUE);
2022  }
2023}
2024
2025void os::signal_notify(int signal_number) {
2026  BOOL ret;
2027  if (sig_sem != NULL) {
2028    Atomic::inc(&pending_signals[signal_number]);
2029    ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2030    assert(ret != 0, "ReleaseSemaphore() failed");
2031  }
2032}
2033
2034static int check_pending_signals(bool wait_for_signal) {
2035  DWORD ret;
2036  while (true) {
2037    for (int i = 0; i < NSIG + 1; i++) {
2038      jint n = pending_signals[i];
2039      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2040        return i;
2041      }
2042    }
2043    if (!wait_for_signal) {
2044      return -1;
2045    }
2046
2047    JavaThread *thread = JavaThread::current();
2048
2049    ThreadBlockInVM tbivm(thread);
2050
2051    bool threadIsSuspended;
2052    do {
2053      thread->set_suspend_equivalent();
2054      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2055      ret = ::WaitForSingleObject(sig_sem, INFINITE);
2056      assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2057
2058      // were we externally suspended while we were waiting?
2059      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2060      if (threadIsSuspended) {
2061        // The semaphore has been incremented, but while we were waiting
2062        // another thread suspended us. We don't want to continue running
2063        // while suspended because that would surprise the thread that
2064        // suspended us.
2065        ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2066        assert(ret != 0, "ReleaseSemaphore() failed");
2067
2068        thread->java_suspend_self();
2069      }
2070    } while (threadIsSuspended);
2071  }
2072}
2073
2074int os::signal_lookup() {
2075  return check_pending_signals(false);
2076}
2077
2078int os::signal_wait() {
2079  return check_pending_signals(true);
2080}
2081
2082// Implicit OS exception handling
2083
2084LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2085                      address handler) {
2086    JavaThread* thread = (JavaThread*) Thread::current_or_null();
2087  // Save pc in thread
2088#ifdef _M_IA64
2089  // Do not blow up if no thread info available.
2090  if (thread) {
2091    // Saving PRECISE pc (with slot information) in thread.
2092    uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2093    // Convert precise PC into "Unix" format
2094    precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2095    thread->set_saved_exception_pc((address)precise_pc);
2096  }
2097  // Set pc to handler
2098  exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2099  // Clear out psr.ri (= Restart Instruction) in order to continue
2100  // at the beginning of the target bundle.
2101  exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2102  assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2103#else
2104  #ifdef _M_AMD64
2105  // Do not blow up if no thread info available.
2106  if (thread) {
2107    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2108  }
2109  // Set pc to handler
2110  exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2111  #else
2112  // Do not blow up if no thread info available.
2113  if (thread) {
2114    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2115  }
2116  // Set pc to handler
2117  exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2118  #endif
2119#endif
2120
2121  // Continue the execution
2122  return EXCEPTION_CONTINUE_EXECUTION;
2123}
2124
2125
2126// Used for PostMortemDump
2127extern "C" void safepoints();
2128extern "C" void find(int x);
2129extern "C" void events();
2130
2131// According to Windows API documentation, an illegal instruction sequence should generate
2132// the 0xC000001C exception code. However, real world experience shows that occasionnaly
2133// the execution of an illegal instruction can generate the exception code 0xC000001E. This
2134// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2135
2136#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2137
2138// From "Execution Protection in the Windows Operating System" draft 0.35
2139// Once a system header becomes available, the "real" define should be
2140// included or copied here.
2141#define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2142
2143// Handle NAT Bit consumption on IA64.
2144#ifdef _M_IA64
2145  #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2146#endif
2147
2148// Windows Vista/2008 heap corruption check
2149#define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2150
2151#define def_excpt(val) #val, val
2152
2153struct siglabel {
2154  char *name;
2155  int   number;
2156};
2157
2158// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2159// C++ compiler contain this error code. Because this is a compiler-generated
2160// error, the code is not listed in the Win32 API header files.
2161// The code is actually a cryptic mnemonic device, with the initial "E"
2162// standing for "exception" and the final 3 bytes (0x6D7363) representing the
2163// ASCII values of "msc".
2164
2165#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2166
2167
2168struct siglabel exceptlabels[] = {
2169    def_excpt(EXCEPTION_ACCESS_VIOLATION),
2170    def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2171    def_excpt(EXCEPTION_BREAKPOINT),
2172    def_excpt(EXCEPTION_SINGLE_STEP),
2173    def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2174    def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2175    def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2176    def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2177    def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2178    def_excpt(EXCEPTION_FLT_OVERFLOW),
2179    def_excpt(EXCEPTION_FLT_STACK_CHECK),
2180    def_excpt(EXCEPTION_FLT_UNDERFLOW),
2181    def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2182    def_excpt(EXCEPTION_INT_OVERFLOW),
2183    def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2184    def_excpt(EXCEPTION_IN_PAGE_ERROR),
2185    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2186    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2187    def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2188    def_excpt(EXCEPTION_STACK_OVERFLOW),
2189    def_excpt(EXCEPTION_INVALID_DISPOSITION),
2190    def_excpt(EXCEPTION_GUARD_PAGE),
2191    def_excpt(EXCEPTION_INVALID_HANDLE),
2192    def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2193    def_excpt(EXCEPTION_HEAP_CORRUPTION),
2194#ifdef _M_IA64
2195    def_excpt(EXCEPTION_REG_NAT_CONSUMPTION),
2196#endif
2197    NULL, 0
2198};
2199
2200const char* os::exception_name(int exception_code, char *buf, size_t size) {
2201  for (int i = 0; exceptlabels[i].name != NULL; i++) {
2202    if (exceptlabels[i].number == exception_code) {
2203      jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2204      return buf;
2205    }
2206  }
2207
2208  return NULL;
2209}
2210
2211//-----------------------------------------------------------------------------
2212LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2213  // handle exception caused by idiv; should only happen for -MinInt/-1
2214  // (division by zero is handled explicitly)
2215#ifdef _M_IA64
2216  assert(0, "Fix Handle_IDiv_Exception");
2217#else
2218  #ifdef  _M_AMD64
2219  PCONTEXT ctx = exceptionInfo->ContextRecord;
2220  address pc = (address)ctx->Rip;
2221  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2222  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2223  if (pc[0] == 0xF7) {
2224    // set correct result values and continue after idiv instruction
2225    ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2226  } else {
2227    ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2228  }
2229  // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2230  // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2231  // idiv opcode (0xF7).
2232  ctx->Rdx = (DWORD)0;             // remainder
2233  // Continue the execution
2234  #else
2235  PCONTEXT ctx = exceptionInfo->ContextRecord;
2236  address pc = (address)ctx->Eip;
2237  assert(pc[0] == 0xF7, "not an idiv opcode");
2238  assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2239  assert(ctx->Eax == min_jint, "unexpected idiv exception");
2240  // set correct result values and continue after idiv instruction
2241  ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2242  ctx->Eax = (DWORD)min_jint;      // result
2243  ctx->Edx = (DWORD)0;             // remainder
2244  // Continue the execution
2245  #endif
2246#endif
2247  return EXCEPTION_CONTINUE_EXECUTION;
2248}
2249
2250//-----------------------------------------------------------------------------
2251LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2252  PCONTEXT ctx = exceptionInfo->ContextRecord;
2253#ifndef  _WIN64
2254  // handle exception caused by native method modifying control word
2255  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2256
2257  switch (exception_code) {
2258  case EXCEPTION_FLT_DENORMAL_OPERAND:
2259  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2260  case EXCEPTION_FLT_INEXACT_RESULT:
2261  case EXCEPTION_FLT_INVALID_OPERATION:
2262  case EXCEPTION_FLT_OVERFLOW:
2263  case EXCEPTION_FLT_STACK_CHECK:
2264  case EXCEPTION_FLT_UNDERFLOW:
2265    jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2266    if (fp_control_word != ctx->FloatSave.ControlWord) {
2267      // Restore FPCW and mask out FLT exceptions
2268      ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2269      // Mask out pending FLT exceptions
2270      ctx->FloatSave.StatusWord &=  0xffffff00;
2271      return EXCEPTION_CONTINUE_EXECUTION;
2272    }
2273  }
2274
2275  if (prev_uef_handler != NULL) {
2276    // We didn't handle this exception so pass it to the previous
2277    // UnhandledExceptionFilter.
2278    return (prev_uef_handler)(exceptionInfo);
2279  }
2280#else // !_WIN64
2281  // On Windows, the mxcsr control bits are non-volatile across calls
2282  // See also CR 6192333
2283  //
2284  jint MxCsr = INITIAL_MXCSR;
2285  // we can't use StubRoutines::addr_mxcsr_std()
2286  // because in Win64 mxcsr is not saved there
2287  if (MxCsr != ctx->MxCsr) {
2288    ctx->MxCsr = MxCsr;
2289    return EXCEPTION_CONTINUE_EXECUTION;
2290  }
2291#endif // !_WIN64
2292
2293  return EXCEPTION_CONTINUE_SEARCH;
2294}
2295
2296static inline void report_error(Thread* t, DWORD exception_code,
2297                                address addr, void* siginfo, void* context) {
2298  VMError::report_and_die(t, exception_code, addr, siginfo, context);
2299
2300  // If UseOsErrorReporting, this will return here and save the error file
2301  // somewhere where we can find it in the minidump.
2302}
2303
2304bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2305        struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2306  PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2307  address addr = (address) exceptionRecord->ExceptionInformation[1];
2308  if (Interpreter::contains(pc)) {
2309    *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2310    if (!fr->is_first_java_frame()) {
2311      assert(fr->safe_for_sender(thread), "Safety check");
2312      *fr = fr->java_sender();
2313    }
2314  } else {
2315    // more complex code with compiled code
2316    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2317    CodeBlob* cb = CodeCache::find_blob(pc);
2318    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2319      // Not sure where the pc points to, fallback to default
2320      // stack overflow handling
2321      return false;
2322    } else {
2323      *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2324      // in compiled code, the stack banging is performed just after the return pc
2325      // has been pushed on the stack
2326      *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2327      if (!fr->is_java_frame()) {
2328        assert(fr->safe_for_sender(thread), "Safety check");
2329        *fr = fr->java_sender();
2330      }
2331    }
2332  }
2333  assert(fr->is_java_frame(), "Safety check");
2334  return true;
2335}
2336
2337//-----------------------------------------------------------------------------
2338LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2339  if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2340  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2341#ifdef _M_IA64
2342  // On Itanium, we need the "precise pc", which has the slot number coded
2343  // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2344  address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2345  // Convert the pc to "Unix format", which has the slot number coded
2346  // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2347  // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2348  // information is saved in the Unix format.
2349  address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2350#else
2351  #ifdef _M_AMD64
2352  address pc = (address) exceptionInfo->ContextRecord->Rip;
2353  #else
2354  address pc = (address) exceptionInfo->ContextRecord->Eip;
2355  #endif
2356#endif
2357  Thread* t = Thread::current_or_null_safe();
2358
2359  // Handle SafeFetch32 and SafeFetchN exceptions.
2360  if (StubRoutines::is_safefetch_fault(pc)) {
2361    return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2362  }
2363
2364#ifndef _WIN64
2365  // Execution protection violation - win32 running on AMD64 only
2366  // Handled first to avoid misdiagnosis as a "normal" access violation;
2367  // This is safe to do because we have a new/unique ExceptionInformation
2368  // code for this condition.
2369  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2370    PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2371    int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2372    address addr = (address) exceptionRecord->ExceptionInformation[1];
2373
2374    if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2375      int page_size = os::vm_page_size();
2376
2377      // Make sure the pc and the faulting address are sane.
2378      //
2379      // If an instruction spans a page boundary, and the page containing
2380      // the beginning of the instruction is executable but the following
2381      // page is not, the pc and the faulting address might be slightly
2382      // different - we still want to unguard the 2nd page in this case.
2383      //
2384      // 15 bytes seems to be a (very) safe value for max instruction size.
2385      bool pc_is_near_addr =
2386        (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2387      bool instr_spans_page_boundary =
2388        (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2389                         (intptr_t) page_size) > 0);
2390
2391      if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2392        static volatile address last_addr =
2393          (address) os::non_memory_address_word();
2394
2395        // In conservative mode, don't unguard unless the address is in the VM
2396        if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2397            (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2398
2399          // Set memory to RWX and retry
2400          address page_start =
2401            (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2402          bool res = os::protect_memory((char*) page_start, page_size,
2403                                        os::MEM_PROT_RWX);
2404
2405          if (PrintMiscellaneous && Verbose) {
2406            char buf[256];
2407            jio_snprintf(buf, sizeof(buf), "Execution protection violation "
2408                         "at " INTPTR_FORMAT
2409                         ", unguarding " INTPTR_FORMAT ": %s", addr,
2410                         page_start, (res ? "success" : strerror(errno)));
2411            tty->print_raw_cr(buf);
2412          }
2413
2414          // Set last_addr so if we fault again at the same address, we don't
2415          // end up in an endless loop.
2416          //
2417          // There are two potential complications here.  Two threads trapping
2418          // at the same address at the same time could cause one of the
2419          // threads to think it already unguarded, and abort the VM.  Likely
2420          // very rare.
2421          //
2422          // The other race involves two threads alternately trapping at
2423          // different addresses and failing to unguard the page, resulting in
2424          // an endless loop.  This condition is probably even more unlikely
2425          // than the first.
2426          //
2427          // Although both cases could be avoided by using locks or thread
2428          // local last_addr, these solutions are unnecessary complication:
2429          // this handler is a best-effort safety net, not a complete solution.
2430          // It is disabled by default and should only be used as a workaround
2431          // in case we missed any no-execute-unsafe VM code.
2432
2433          last_addr = addr;
2434
2435          return EXCEPTION_CONTINUE_EXECUTION;
2436        }
2437      }
2438
2439      // Last unguard failed or not unguarding
2440      tty->print_raw_cr("Execution protection violation");
2441      report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2442                   exceptionInfo->ContextRecord);
2443      return EXCEPTION_CONTINUE_SEARCH;
2444    }
2445  }
2446#endif // _WIN64
2447
2448  // Check to see if we caught the safepoint code in the
2449  // process of write protecting the memory serialization page.
2450  // It write enables the page immediately after protecting it
2451  // so just return.
2452  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2453    JavaThread* thread = (JavaThread*) t;
2454    PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2455    address addr = (address) exceptionRecord->ExceptionInformation[1];
2456    if (os::is_memory_serialize_page(thread, addr)) {
2457      // Block current thread until the memory serialize page permission restored.
2458      os::block_on_serialize_page_trap();
2459      return EXCEPTION_CONTINUE_EXECUTION;
2460    }
2461  }
2462
2463  if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2464      VM_Version::is_cpuinfo_segv_addr(pc)) {
2465    // Verify that OS save/restore AVX registers.
2466    return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2467  }
2468
2469  if (t != NULL && t->is_Java_thread()) {
2470    JavaThread* thread = (JavaThread*) t;
2471    bool in_java = thread->thread_state() == _thread_in_Java;
2472
2473    // Handle potential stack overflows up front.
2474    if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2475#ifdef _M_IA64
2476      // Use guard page for register stack.
2477      PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2478      address addr = (address) exceptionRecord->ExceptionInformation[1];
2479      // Check for a register stack overflow on Itanium
2480      if (thread->addr_inside_register_stack_red_zone(addr)) {
2481        // Fatal red zone violation happens if the Java program
2482        // catches a StackOverflow error and does so much processing
2483        // that it runs beyond the unprotected yellow guard zone. As
2484        // a result, we are out of here.
2485        fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2486      } else if(thread->addr_inside_register_stack(addr)) {
2487        // Disable the yellow zone which sets the state that
2488        // we've got a stack overflow problem.
2489        if (thread->stack_yellow_reserved_zone_enabled()) {
2490          thread->disable_stack_yellow_reserved_zone();
2491        }
2492        // Give us some room to process the exception.
2493        thread->disable_register_stack_guard();
2494        // Tracing with +Verbose.
2495        if (Verbose) {
2496          tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2497          tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2498          tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2499          tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2500                        thread->register_stack_base(),
2501                        thread->register_stack_base() + thread->stack_size());
2502        }
2503
2504        // Reguard the permanent register stack red zone just to be sure.
2505        // We saw Windows silently disabling this without telling us.
2506        thread->enable_register_stack_red_zone();
2507
2508        return Handle_Exception(exceptionInfo,
2509                                SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2510      }
2511#endif
2512      if (thread->stack_guards_enabled()) {
2513        if (_thread_in_Java) {
2514          frame fr;
2515          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2516          address addr = (address) exceptionRecord->ExceptionInformation[1];
2517          if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2518            assert(fr.is_java_frame(), "Must be a Java frame");
2519            SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2520          }
2521        }
2522        // Yellow zone violation.  The o/s has unprotected the first yellow
2523        // zone page for us.  Note:  must call disable_stack_yellow_zone to
2524        // update the enabled status, even if the zone contains only one page.
2525        thread->disable_stack_yellow_reserved_zone();
2526        // If not in java code, return and hope for the best.
2527        return in_java
2528            ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2529            :  EXCEPTION_CONTINUE_EXECUTION;
2530      } else {
2531        // Fatal red zone violation.
2532        thread->disable_stack_red_zone();
2533        tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2534        report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2535                      exceptionInfo->ContextRecord);
2536        return EXCEPTION_CONTINUE_SEARCH;
2537      }
2538    } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2539      // Either stack overflow or null pointer exception.
2540      if (in_java) {
2541        PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2542        address addr = (address) exceptionRecord->ExceptionInformation[1];
2543        address stack_end = thread->stack_end();
2544        if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2545          // Stack overflow.
2546          assert(!os::uses_stack_guard_pages(),
2547                 "should be caught by red zone code above.");
2548          return Handle_Exception(exceptionInfo,
2549                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2550        }
2551        // Check for safepoint polling and implicit null
2552        // We only expect null pointers in the stubs (vtable)
2553        // the rest are checked explicitly now.
2554        CodeBlob* cb = CodeCache::find_blob(pc);
2555        if (cb != NULL) {
2556          if (os::is_poll_address(addr)) {
2557            address stub = SharedRuntime::get_poll_stub(pc);
2558            return Handle_Exception(exceptionInfo, stub);
2559          }
2560        }
2561        {
2562#ifdef _WIN64
2563          // If it's a legal stack address map the entire region in
2564          //
2565          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2566          address addr = (address) exceptionRecord->ExceptionInformation[1];
2567          if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2568            addr = (address)((uintptr_t)addr &
2569                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2570            os::commit_memory((char *)addr, thread->stack_base() - addr,
2571                              !ExecMem);
2572            return EXCEPTION_CONTINUE_EXECUTION;
2573          } else
2574#endif
2575          {
2576            // Null pointer exception.
2577#ifdef _M_IA64
2578            // Process implicit null checks in compiled code. Note: Implicit null checks
2579            // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2580            if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2581              CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2582              // Handle implicit null check in UEP method entry
2583              if (cb && (cb->is_frame_complete_at(pc) ||
2584                         (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2585                if (Verbose) {
2586                  intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2587                  tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2588                  tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2589                  tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2590                                *(bundle_start + 1), *bundle_start);
2591                }
2592                return Handle_Exception(exceptionInfo,
2593                                        SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2594              }
2595            }
2596
2597            // Implicit null checks were processed above.  Hence, we should not reach
2598            // here in the usual case => die!
2599            if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2600            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2601                         exceptionInfo->ContextRecord);
2602            return EXCEPTION_CONTINUE_SEARCH;
2603
2604#else // !IA64
2605
2606            if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2607              address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2608              if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2609            }
2610            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2611                         exceptionInfo->ContextRecord);
2612            return EXCEPTION_CONTINUE_SEARCH;
2613#endif
2614          }
2615        }
2616      }
2617
2618#ifdef _WIN64
2619      // Special care for fast JNI field accessors.
2620      // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2621      // in and the heap gets shrunk before the field access.
2622      if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2623        address addr = JNI_FastGetField::find_slowcase_pc(pc);
2624        if (addr != (address)-1) {
2625          return Handle_Exception(exceptionInfo, addr);
2626        }
2627      }
2628#endif
2629
2630      // Stack overflow or null pointer exception in native code.
2631      report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2632                   exceptionInfo->ContextRecord);
2633      return EXCEPTION_CONTINUE_SEARCH;
2634    } // /EXCEPTION_ACCESS_VIOLATION
2635    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2636#if defined _M_IA64
2637    else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2638              exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2639      M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2640
2641      // Compiled method patched to be non entrant? Following conditions must apply:
2642      // 1. must be first instruction in bundle
2643      // 2. must be a break instruction with appropriate code
2644      if ((((uint64_t) pc & 0x0F) == 0) &&
2645          (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2646        return Handle_Exception(exceptionInfo,
2647                                (address)SharedRuntime::get_handle_wrong_method_stub());
2648      }
2649    } // /EXCEPTION_ILLEGAL_INSTRUCTION
2650#endif
2651
2652
2653    if (in_java) {
2654      switch (exception_code) {
2655      case EXCEPTION_INT_DIVIDE_BY_ZERO:
2656        return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2657
2658      case EXCEPTION_INT_OVERFLOW:
2659        return Handle_IDiv_Exception(exceptionInfo);
2660
2661      } // switch
2662    }
2663    if (((thread->thread_state() == _thread_in_Java) ||
2664         (thread->thread_state() == _thread_in_native)) &&
2665         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2666      LONG result=Handle_FLT_Exception(exceptionInfo);
2667      if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2668    }
2669  }
2670
2671  if (exception_code != EXCEPTION_BREAKPOINT) {
2672    report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2673                 exceptionInfo->ContextRecord);
2674  }
2675  return EXCEPTION_CONTINUE_SEARCH;
2676}
2677
2678#ifndef _WIN64
2679// Special care for fast JNI accessors.
2680// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2681// the heap gets shrunk before the field access.
2682// Need to install our own structured exception handler since native code may
2683// install its own.
2684LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2685  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2686  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2687    address pc = (address) exceptionInfo->ContextRecord->Eip;
2688    address addr = JNI_FastGetField::find_slowcase_pc(pc);
2689    if (addr != (address)-1) {
2690      return Handle_Exception(exceptionInfo, addr);
2691    }
2692  }
2693  return EXCEPTION_CONTINUE_SEARCH;
2694}
2695
2696#define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2697  Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2698                                                     jobject obj,           \
2699                                                     jfieldID fieldID) {    \
2700    __try {                                                                 \
2701      return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2702                                                                 obj,       \
2703                                                                 fieldID);  \
2704    } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2705                                              _exception_info())) {         \
2706    }                                                                       \
2707    return 0;                                                               \
2708  }
2709
2710DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2711DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2712DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2713DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2714DEFINE_FAST_GETFIELD(jint,     int,    Int)
2715DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2716DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2717DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2718
2719address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2720  switch (type) {
2721  case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2722  case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2723  case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2724  case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2725  case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2726  case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2727  case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2728  case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2729  default:        ShouldNotReachHere();
2730  }
2731  return (address)-1;
2732}
2733#endif
2734
2735// Virtual Memory
2736
2737int os::vm_page_size() { return os::win32::vm_page_size(); }
2738int os::vm_allocation_granularity() {
2739  return os::win32::vm_allocation_granularity();
2740}
2741
2742// Windows large page support is available on Windows 2003. In order to use
2743// large page memory, the administrator must first assign additional privilege
2744// to the user:
2745//   + select Control Panel -> Administrative Tools -> Local Security Policy
2746//   + select Local Policies -> User Rights Assignment
2747//   + double click "Lock pages in memory", add users and/or groups
2748//   + reboot
2749// Note the above steps are needed for administrator as well, as administrators
2750// by default do not have the privilege to lock pages in memory.
2751//
2752// Note about Windows 2003: although the API supports committing large page
2753// memory on a page-by-page basis and VirtualAlloc() returns success under this
2754// scenario, I found through experiment it only uses large page if the entire
2755// memory region is reserved and committed in a single VirtualAlloc() call.
2756// This makes Windows large page support more or less like Solaris ISM, in
2757// that the entire heap must be committed upfront. This probably will change
2758// in the future, if so the code below needs to be revisited.
2759
2760#ifndef MEM_LARGE_PAGES
2761  #define MEM_LARGE_PAGES 0x20000000
2762#endif
2763
2764static HANDLE    _hProcess;
2765static HANDLE    _hToken;
2766
2767// Container for NUMA node list info
2768class NUMANodeListHolder {
2769 private:
2770  int *_numa_used_node_list;  // allocated below
2771  int _numa_used_node_count;
2772
2773  void free_node_list() {
2774    if (_numa_used_node_list != NULL) {
2775      FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2776    }
2777  }
2778
2779 public:
2780  NUMANodeListHolder() {
2781    _numa_used_node_count = 0;
2782    _numa_used_node_list = NULL;
2783    // do rest of initialization in build routine (after function pointers are set up)
2784  }
2785
2786  ~NUMANodeListHolder() {
2787    free_node_list();
2788  }
2789
2790  bool build() {
2791    DWORD_PTR proc_aff_mask;
2792    DWORD_PTR sys_aff_mask;
2793    if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2794    ULONG highest_node_number;
2795    if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2796    free_node_list();
2797    _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2798    for (unsigned int i = 0; i <= highest_node_number; i++) {
2799      ULONGLONG proc_mask_numa_node;
2800      if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2801      if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2802        _numa_used_node_list[_numa_used_node_count++] = i;
2803      }
2804    }
2805    return (_numa_used_node_count > 1);
2806  }
2807
2808  int get_count() { return _numa_used_node_count; }
2809  int get_node_list_entry(int n) {
2810    // for indexes out of range, returns -1
2811    return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2812  }
2813
2814} numa_node_list_holder;
2815
2816
2817
2818static size_t _large_page_size = 0;
2819
2820static bool request_lock_memory_privilege() {
2821  _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2822                          os::current_process_id());
2823
2824  LUID luid;
2825  if (_hProcess != NULL &&
2826      OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2827      LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2828
2829    TOKEN_PRIVILEGES tp;
2830    tp.PrivilegeCount = 1;
2831    tp.Privileges[0].Luid = luid;
2832    tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2833
2834    // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2835    // privilege. Check GetLastError() too. See MSDN document.
2836    if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2837        (GetLastError() == ERROR_SUCCESS)) {
2838      return true;
2839    }
2840  }
2841
2842  return false;
2843}
2844
2845static void cleanup_after_large_page_init() {
2846  if (_hProcess) CloseHandle(_hProcess);
2847  _hProcess = NULL;
2848  if (_hToken) CloseHandle(_hToken);
2849  _hToken = NULL;
2850}
2851
2852static bool numa_interleaving_init() {
2853  bool success = false;
2854  bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2855
2856  // print a warning if UseNUMAInterleaving flag is specified on command line
2857  bool warn_on_failure = use_numa_interleaving_specified;
2858#define WARN(msg) if (warn_on_failure) { warning(msg); }
2859
2860  // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2861  size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2862  NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2863
2864  if (numa_node_list_holder.build()) {
2865    if (PrintMiscellaneous && Verbose) {
2866      tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2867      for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2868        tty->print("%d ", numa_node_list_holder.get_node_list_entry(i));
2869      }
2870      tty->print("\n");
2871    }
2872    success = true;
2873  } else {
2874    WARN("Process does not cover multiple NUMA nodes.");
2875  }
2876  if (!success) {
2877    if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2878  }
2879  return success;
2880#undef WARN
2881}
2882
2883// this routine is used whenever we need to reserve a contiguous VA range
2884// but we need to make separate VirtualAlloc calls for each piece of the range
2885// Reasons for doing this:
2886//  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2887//  * UseNUMAInterleaving requires a separate node for each piece
2888static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2889                                         DWORD prot,
2890                                         bool should_inject_error = false) {
2891  char * p_buf;
2892  // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2893  size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2894  size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2895
2896  // first reserve enough address space in advance since we want to be
2897  // able to break a single contiguous virtual address range into multiple
2898  // large page commits but WS2003 does not allow reserving large page space
2899  // so we just use 4K pages for reserve, this gives us a legal contiguous
2900  // address space. then we will deallocate that reservation, and re alloc
2901  // using large pages
2902  const size_t size_of_reserve = bytes + chunk_size;
2903  if (bytes > size_of_reserve) {
2904    // Overflowed.
2905    return NULL;
2906  }
2907  p_buf = (char *) VirtualAlloc(addr,
2908                                size_of_reserve,  // size of Reserve
2909                                MEM_RESERVE,
2910                                PAGE_READWRITE);
2911  // If reservation failed, return NULL
2912  if (p_buf == NULL) return NULL;
2913  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2914  os::release_memory(p_buf, bytes + chunk_size);
2915
2916  // we still need to round up to a page boundary (in case we are using large pages)
2917  // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2918  // instead we handle this in the bytes_to_rq computation below
2919  p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2920
2921  // now go through and allocate one chunk at a time until all bytes are
2922  // allocated
2923  size_t  bytes_remaining = bytes;
2924  // An overflow of align_size_up() would have been caught above
2925  // in the calculation of size_of_reserve.
2926  char * next_alloc_addr = p_buf;
2927  HANDLE hProc = GetCurrentProcess();
2928
2929#ifdef ASSERT
2930  // Variable for the failure injection
2931  long ran_num = os::random();
2932  size_t fail_after = ran_num % bytes;
2933#endif
2934
2935  int count=0;
2936  while (bytes_remaining) {
2937    // select bytes_to_rq to get to the next chunk_size boundary
2938
2939    size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2940    // Note allocate and commit
2941    char * p_new;
2942
2943#ifdef ASSERT
2944    bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2945#else
2946    const bool inject_error_now = false;
2947#endif
2948
2949    if (inject_error_now) {
2950      p_new = NULL;
2951    } else {
2952      if (!UseNUMAInterleaving) {
2953        p_new = (char *) VirtualAlloc(next_alloc_addr,
2954                                      bytes_to_rq,
2955                                      flags,
2956                                      prot);
2957      } else {
2958        // get the next node to use from the used_node_list
2959        assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2960        DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2961        p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2962      }
2963    }
2964
2965    if (p_new == NULL) {
2966      // Free any allocated pages
2967      if (next_alloc_addr > p_buf) {
2968        // Some memory was committed so release it.
2969        size_t bytes_to_release = bytes - bytes_remaining;
2970        // NMT has yet to record any individual blocks, so it
2971        // need to create a dummy 'reserve' record to match
2972        // the release.
2973        MemTracker::record_virtual_memory_reserve((address)p_buf,
2974                                                  bytes_to_release, CALLER_PC);
2975        os::release_memory(p_buf, bytes_to_release);
2976      }
2977#ifdef ASSERT
2978      if (should_inject_error) {
2979        if (TracePageSizes && Verbose) {
2980          tty->print_cr("Reserving pages individually failed.");
2981        }
2982      }
2983#endif
2984      return NULL;
2985    }
2986
2987    bytes_remaining -= bytes_to_rq;
2988    next_alloc_addr += bytes_to_rq;
2989    count++;
2990  }
2991  // Although the memory is allocated individually, it is returned as one.
2992  // NMT records it as one block.
2993  if ((flags & MEM_COMMIT) != 0) {
2994    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2995  } else {
2996    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2997  }
2998
2999  // made it this far, success
3000  return p_buf;
3001}
3002
3003
3004
3005void os::large_page_init() {
3006  if (!UseLargePages) return;
3007
3008  // print a warning if any large page related flag is specified on command line
3009  bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3010                         !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3011  bool success = false;
3012
3013#define WARN(msg) if (warn_on_failure) { warning(msg); }
3014  if (request_lock_memory_privilege()) {
3015    size_t s = GetLargePageMinimum();
3016    if (s) {
3017#if defined(IA32) || defined(AMD64)
3018      if (s > 4*M || LargePageSizeInBytes > 4*M) {
3019        WARN("JVM cannot use large pages bigger than 4mb.");
3020      } else {
3021#endif
3022        if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3023          _large_page_size = LargePageSizeInBytes;
3024        } else {
3025          _large_page_size = s;
3026        }
3027        success = true;
3028#if defined(IA32) || defined(AMD64)
3029      }
3030#endif
3031    } else {
3032      WARN("Large page is not supported by the processor.");
3033    }
3034  } else {
3035    WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3036  }
3037#undef WARN
3038
3039  const size_t default_page_size = (size_t) vm_page_size();
3040  if (success && _large_page_size > default_page_size) {
3041    _page_sizes[0] = _large_page_size;
3042    _page_sizes[1] = default_page_size;
3043    _page_sizes[2] = 0;
3044  }
3045
3046  cleanup_after_large_page_init();
3047  UseLargePages = success;
3048}
3049
3050// On win32, one cannot release just a part of reserved memory, it's an
3051// all or nothing deal.  When we split a reservation, we must break the
3052// reservation into two reservations.
3053void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3054                                  bool realloc) {
3055  if (size > 0) {
3056    release_memory(base, size);
3057    if (realloc) {
3058      reserve_memory(split, base);
3059    }
3060    if (size != split) {
3061      reserve_memory(size - split, base + split);
3062    }
3063  }
3064}
3065
3066// Multiple threads can race in this code but it's not possible to unmap small sections of
3067// virtual space to get requested alignment, like posix-like os's.
3068// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3069char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3070  assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3071         "Alignment must be a multiple of allocation granularity (page size)");
3072  assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3073
3074  size_t extra_size = size + alignment;
3075  assert(extra_size >= size, "overflow, size is too large to allow alignment");
3076
3077  char* aligned_base = NULL;
3078
3079  do {
3080    char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3081    if (extra_base == NULL) {
3082      return NULL;
3083    }
3084    // Do manual alignment
3085    aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3086
3087    os::release_memory(extra_base, extra_size);
3088
3089    aligned_base = os::reserve_memory(size, aligned_base);
3090
3091  } while (aligned_base == NULL);
3092
3093  return aligned_base;
3094}
3095
3096char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3097  assert((size_t)addr % os::vm_allocation_granularity() == 0,
3098         "reserve alignment");
3099  assert(bytes % os::vm_page_size() == 0, "reserve page size");
3100  char* res;
3101  // note that if UseLargePages is on, all the areas that require interleaving
3102  // will go thru reserve_memory_special rather than thru here.
3103  bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3104  if (!use_individual) {
3105    res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3106  } else {
3107    elapsedTimer reserveTimer;
3108    if (Verbose && PrintMiscellaneous) reserveTimer.start();
3109    // in numa interleaving, we have to allocate pages individually
3110    // (well really chunks of NUMAInterleaveGranularity size)
3111    res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3112    if (res == NULL) {
3113      warning("NUMA page allocation failed");
3114    }
3115    if (Verbose && PrintMiscellaneous) {
3116      reserveTimer.stop();
3117      tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3118                    reserveTimer.milliseconds(), reserveTimer.ticks());
3119    }
3120  }
3121  assert(res == NULL || addr == NULL || addr == res,
3122         "Unexpected address from reserve.");
3123
3124  return res;
3125}
3126
3127// Reserve memory at an arbitrary address, only if that area is
3128// available (and not reserved for something else).
3129char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3130  // Windows os::reserve_memory() fails of the requested address range is
3131  // not avilable.
3132  return reserve_memory(bytes, requested_addr);
3133}
3134
3135size_t os::large_page_size() {
3136  return _large_page_size;
3137}
3138
3139bool os::can_commit_large_page_memory() {
3140  // Windows only uses large page memory when the entire region is reserved
3141  // and committed in a single VirtualAlloc() call. This may change in the
3142  // future, but with Windows 2003 it's not possible to commit on demand.
3143  return false;
3144}
3145
3146bool os::can_execute_large_page_memory() {
3147  return true;
3148}
3149
3150char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3151                                 bool exec) {
3152  assert(UseLargePages, "only for large pages");
3153
3154  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3155    return NULL; // Fallback to small pages.
3156  }
3157
3158  const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3159  const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3160
3161  // with large pages, there are two cases where we need to use Individual Allocation
3162  // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3163  // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3164  if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3165    if (TracePageSizes && Verbose) {
3166      tty->print_cr("Reserving large pages individually.");
3167    }
3168    char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3169    if (p_buf == NULL) {
3170      // give an appropriate warning message
3171      if (UseNUMAInterleaving) {
3172        warning("NUMA large page allocation failed, UseLargePages flag ignored");
3173      }
3174      if (UseLargePagesIndividualAllocation) {
3175        warning("Individually allocated large pages failed, "
3176                "use -XX:-UseLargePagesIndividualAllocation to turn off");
3177      }
3178      return NULL;
3179    }
3180
3181    return p_buf;
3182
3183  } else {
3184    if (TracePageSizes && Verbose) {
3185      tty->print_cr("Reserving large pages in a single large chunk.");
3186    }
3187    // normal policy just allocate it all at once
3188    DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3189    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3190    if (res != NULL) {
3191      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3192    }
3193
3194    return res;
3195  }
3196}
3197
3198bool os::release_memory_special(char* base, size_t bytes) {
3199  assert(base != NULL, "Sanity check");
3200  return release_memory(base, bytes);
3201}
3202
3203void os::print_statistics() {
3204}
3205
3206static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3207  int err = os::get_last_error();
3208  char buf[256];
3209  size_t buf_len = os::lasterror(buf, sizeof(buf));
3210  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3211          ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3212          exec, buf_len != 0 ? buf : "<no_error_string>", err);
3213}
3214
3215bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3216  if (bytes == 0) {
3217    // Don't bother the OS with noops.
3218    return true;
3219  }
3220  assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3221  assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3222  // Don't attempt to print anything if the OS call fails. We're
3223  // probably low on resources, so the print itself may cause crashes.
3224
3225  // unless we have NUMAInterleaving enabled, the range of a commit
3226  // is always within a reserve covered by a single VirtualAlloc
3227  // in that case we can just do a single commit for the requested size
3228  if (!UseNUMAInterleaving) {
3229    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3230      NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3231      return false;
3232    }
3233    if (exec) {
3234      DWORD oldprot;
3235      // Windows doc says to use VirtualProtect to get execute permissions
3236      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3237        NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3238        return false;
3239      }
3240    }
3241    return true;
3242  } else {
3243
3244    // when NUMAInterleaving is enabled, the commit might cover a range that
3245    // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3246    // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3247    // returns represents the number of bytes that can be committed in one step.
3248    size_t bytes_remaining = bytes;
3249    char * next_alloc_addr = addr;
3250    while (bytes_remaining > 0) {
3251      MEMORY_BASIC_INFORMATION alloc_info;
3252      VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3253      size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3254      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3255                       PAGE_READWRITE) == NULL) {
3256        NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3257                                            exec);)
3258        return false;
3259      }
3260      if (exec) {
3261        DWORD oldprot;
3262        if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3263                            PAGE_EXECUTE_READWRITE, &oldprot)) {
3264          NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3265                                              exec);)
3266          return false;
3267        }
3268      }
3269      bytes_remaining -= bytes_to_rq;
3270      next_alloc_addr += bytes_to_rq;
3271    }
3272  }
3273  // if we made it this far, return true
3274  return true;
3275}
3276
3277bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3278                          bool exec) {
3279  // alignment_hint is ignored on this OS
3280  return pd_commit_memory(addr, size, exec);
3281}
3282
3283void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3284                                  const char* mesg) {
3285  assert(mesg != NULL, "mesg must be specified");
3286  if (!pd_commit_memory(addr, size, exec)) {
3287    warn_fail_commit_memory(addr, size, exec);
3288    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3289  }
3290}
3291
3292void os::pd_commit_memory_or_exit(char* addr, size_t size,
3293                                  size_t alignment_hint, bool exec,
3294                                  const char* mesg) {
3295  // alignment_hint is ignored on this OS
3296  pd_commit_memory_or_exit(addr, size, exec, mesg);
3297}
3298
3299bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3300  if (bytes == 0) {
3301    // Don't bother the OS with noops.
3302    return true;
3303  }
3304  assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3305  assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3306  return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3307}
3308
3309bool os::pd_release_memory(char* addr, size_t bytes) {
3310  return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3311}
3312
3313bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3314  return os::commit_memory(addr, size, !ExecMem);
3315}
3316
3317bool os::remove_stack_guard_pages(char* addr, size_t size) {
3318  return os::uncommit_memory(addr, size);
3319}
3320
3321static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3322  uint count = 0;
3323  bool ret = false;
3324  size_t bytes_remaining = bytes;
3325  char * next_protect_addr = addr;
3326
3327  // Use VirtualQuery() to get the chunk size.
3328  while (bytes_remaining) {
3329    MEMORY_BASIC_INFORMATION alloc_info;
3330    if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3331      return false;
3332    }
3333
3334    size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3335    // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3336    // but we don't distinguish here as both cases are protected by same API.
3337    ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3338    warning("Failed protecting pages individually for chunk #%u", count);
3339    if (!ret) {
3340      return false;
3341    }
3342
3343    bytes_remaining -= bytes_to_protect;
3344    next_protect_addr += bytes_to_protect;
3345    count++;
3346  }
3347  return ret;
3348}
3349
3350// Set protections specified
3351bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3352                        bool is_committed) {
3353  unsigned int p = 0;
3354  switch (prot) {
3355  case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3356  case MEM_PROT_READ: p = PAGE_READONLY; break;
3357  case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3358  case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3359  default:
3360    ShouldNotReachHere();
3361  }
3362
3363  DWORD old_status;
3364
3365  // Strange enough, but on Win32 one can change protection only for committed
3366  // memory, not a big deal anyway, as bytes less or equal than 64K
3367  if (!is_committed) {
3368    commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3369                          "cannot commit protection page");
3370  }
3371  // One cannot use os::guard_memory() here, as on Win32 guard page
3372  // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3373  //
3374  // Pages in the region become guard pages. Any attempt to access a guard page
3375  // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3376  // the guard page status. Guard pages thus act as a one-time access alarm.
3377  bool ret;
3378  if (UseNUMAInterleaving) {
3379    // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3380    // so we must protect the chunks individually.
3381    ret = protect_pages_individually(addr, bytes, p, &old_status);
3382  } else {
3383    ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3384  }
3385#ifdef ASSERT
3386  if (!ret) {
3387    int err = os::get_last_error();
3388    char buf[256];
3389    size_t buf_len = os::lasterror(buf, sizeof(buf));
3390    warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3391          ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3392          buf_len != 0 ? buf : "<no_error_string>", err);
3393  }
3394#endif
3395  return ret;
3396}
3397
3398bool os::guard_memory(char* addr, size_t bytes) {
3399  DWORD old_status;
3400  return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3401}
3402
3403bool os::unguard_memory(char* addr, size_t bytes) {
3404  DWORD old_status;
3405  return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3406}
3407
3408void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3409void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3410void os::numa_make_global(char *addr, size_t bytes)    { }
3411void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3412bool os::numa_topology_changed()                       { return false; }
3413size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3414int os::numa_get_group_id()                            { return 0; }
3415size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3416  if (numa_node_list_holder.get_count() == 0 && size > 0) {
3417    // Provide an answer for UMA systems
3418    ids[0] = 0;
3419    return 1;
3420  } else {
3421    // check for size bigger than actual groups_num
3422    size = MIN2(size, numa_get_groups_num());
3423    for (int i = 0; i < (int)size; i++) {
3424      ids[i] = numa_node_list_holder.get_node_list_entry(i);
3425    }
3426    return size;
3427  }
3428}
3429
3430bool os::get_page_info(char *start, page_info* info) {
3431  return false;
3432}
3433
3434char *os::scan_pages(char *start, char* end, page_info* page_expected,
3435                     page_info* page_found) {
3436  return end;
3437}
3438
3439char* os::non_memory_address_word() {
3440  // Must never look like an address returned by reserve_memory,
3441  // even in its subfields (as defined by the CPU immediate fields,
3442  // if the CPU splits constants across multiple instructions).
3443  return (char*)-1;
3444}
3445
3446#define MAX_ERROR_COUNT 100
3447#define SYS_THREAD_ERROR 0xffffffffUL
3448
3449void os::pd_start_thread(Thread* thread) {
3450  DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3451  // Returns previous suspend state:
3452  // 0:  Thread was not suspended
3453  // 1:  Thread is running now
3454  // >1: Thread is still suspended.
3455  assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3456}
3457
3458class HighResolutionInterval : public CHeapObj<mtThread> {
3459  // The default timer resolution seems to be 10 milliseconds.
3460  // (Where is this written down?)
3461  // If someone wants to sleep for only a fraction of the default,
3462  // then we set the timer resolution down to 1 millisecond for
3463  // the duration of their interval.
3464  // We carefully set the resolution back, since otherwise we
3465  // seem to incur an overhead (3%?) that we don't need.
3466  // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3467  // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3468  // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3469  // timeBeginPeriod() if the relative error exceeded some threshold.
3470  // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3471  // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3472  // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3473  // resolution timers running.
3474 private:
3475  jlong resolution;
3476 public:
3477  HighResolutionInterval(jlong ms) {
3478    resolution = ms % 10L;
3479    if (resolution != 0) {
3480      MMRESULT result = timeBeginPeriod(1L);
3481    }
3482  }
3483  ~HighResolutionInterval() {
3484    if (resolution != 0) {
3485      MMRESULT result = timeEndPeriod(1L);
3486    }
3487    resolution = 0L;
3488  }
3489};
3490
3491int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3492  jlong limit = (jlong) MAXDWORD;
3493
3494  while (ms > limit) {
3495    int res;
3496    if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3497      return res;
3498    }
3499    ms -= limit;
3500  }
3501
3502  assert(thread == Thread::current(), "thread consistency check");
3503  OSThread* osthread = thread->osthread();
3504  OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3505  int result;
3506  if (interruptable) {
3507    assert(thread->is_Java_thread(), "must be java thread");
3508    JavaThread *jt = (JavaThread *) thread;
3509    ThreadBlockInVM tbivm(jt);
3510
3511    jt->set_suspend_equivalent();
3512    // cleared by handle_special_suspend_equivalent_condition() or
3513    // java_suspend_self() via check_and_wait_while_suspended()
3514
3515    HANDLE events[1];
3516    events[0] = osthread->interrupt_event();
3517    HighResolutionInterval *phri=NULL;
3518    if (!ForceTimeHighResolution) {
3519      phri = new HighResolutionInterval(ms);
3520    }
3521    if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3522      result = OS_TIMEOUT;
3523    } else {
3524      ResetEvent(osthread->interrupt_event());
3525      osthread->set_interrupted(false);
3526      result = OS_INTRPT;
3527    }
3528    delete phri; //if it is NULL, harmless
3529
3530    // were we externally suspended while we were waiting?
3531    jt->check_and_wait_while_suspended();
3532  } else {
3533    assert(!thread->is_Java_thread(), "must not be java thread");
3534    Sleep((long) ms);
3535    result = OS_TIMEOUT;
3536  }
3537  return result;
3538}
3539
3540// Short sleep, direct OS call.
3541//
3542// ms = 0, means allow others (if any) to run.
3543//
3544void os::naked_short_sleep(jlong ms) {
3545  assert(ms < 1000, "Un-interruptable sleep, short time use only");
3546  Sleep(ms);
3547}
3548
3549// Sleep forever; naked call to OS-specific sleep; use with CAUTION
3550void os::infinite_sleep() {
3551  while (true) {    // sleep forever ...
3552    Sleep(100000);  // ... 100 seconds at a time
3553  }
3554}
3555
3556typedef BOOL (WINAPI * STTSignature)(void);
3557
3558void os::naked_yield() {
3559  // Consider passing back the return value from SwitchToThread().
3560  SwitchToThread();
3561}
3562
3563// Win32 only gives you access to seven real priorities at a time,
3564// so we compress Java's ten down to seven.  It would be better
3565// if we dynamically adjusted relative priorities.
3566
3567int os::java_to_os_priority[CriticalPriority + 1] = {
3568  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3569  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3570  THREAD_PRIORITY_LOWEST,                       // 2
3571  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3572  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3573  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3574  THREAD_PRIORITY_NORMAL,                       // 6
3575  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3576  THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3577  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3578  THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3579  THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3580};
3581
3582int prio_policy1[CriticalPriority + 1] = {
3583  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3584  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3585  THREAD_PRIORITY_LOWEST,                       // 2
3586  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3587  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3588  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3589  THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3590  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3591  THREAD_PRIORITY_HIGHEST,                      // 8
3592  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3593  THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3594  THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3595};
3596
3597static int prio_init() {
3598  // If ThreadPriorityPolicy is 1, switch tables
3599  if (ThreadPriorityPolicy == 1) {
3600    int i;
3601    for (i = 0; i < CriticalPriority + 1; i++) {
3602      os::java_to_os_priority[i] = prio_policy1[i];
3603    }
3604  }
3605  if (UseCriticalJavaThreadPriority) {
3606    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3607  }
3608  return 0;
3609}
3610
3611OSReturn os::set_native_priority(Thread* thread, int priority) {
3612  if (!UseThreadPriorities) return OS_OK;
3613  bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3614  return ret ? OS_OK : OS_ERR;
3615}
3616
3617OSReturn os::get_native_priority(const Thread* const thread,
3618                                 int* priority_ptr) {
3619  if (!UseThreadPriorities) {
3620    *priority_ptr = java_to_os_priority[NormPriority];
3621    return OS_OK;
3622  }
3623  int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3624  if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3625    assert(false, "GetThreadPriority failed");
3626    return OS_ERR;
3627  }
3628  *priority_ptr = os_prio;
3629  return OS_OK;
3630}
3631
3632
3633// Hint to the underlying OS that a task switch would not be good.
3634// Void return because it's a hint and can fail.
3635void os::hint_no_preempt() {}
3636
3637void os::interrupt(Thread* thread) {
3638  assert(!thread->is_Java_thread() || Thread::current() == thread ||
3639         Threads_lock->owned_by_self(),
3640         "possibility of dangling Thread pointer");
3641
3642  OSThread* osthread = thread->osthread();
3643  osthread->set_interrupted(true);
3644  // More than one thread can get here with the same value of osthread,
3645  // resulting in multiple notifications.  We do, however, want the store
3646  // to interrupted() to be visible to other threads before we post
3647  // the interrupt event.
3648  OrderAccess::release();
3649  SetEvent(osthread->interrupt_event());
3650  // For JSR166:  unpark after setting status
3651  if (thread->is_Java_thread()) {
3652    ((JavaThread*)thread)->parker()->unpark();
3653  }
3654
3655  ParkEvent * ev = thread->_ParkEvent;
3656  if (ev != NULL) ev->unpark();
3657}
3658
3659
3660bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3661  assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3662         "possibility of dangling Thread pointer");
3663
3664  OSThread* osthread = thread->osthread();
3665  // There is no synchronization between the setting of the interrupt
3666  // and it being cleared here. It is critical - see 6535709 - that
3667  // we only clear the interrupt state, and reset the interrupt event,
3668  // if we are going to report that we were indeed interrupted - else
3669  // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3670  // depending on the timing. By checking thread interrupt event to see
3671  // if the thread gets real interrupt thus prevent spurious wakeup.
3672  bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3673  if (interrupted && clear_interrupted) {
3674    osthread->set_interrupted(false);
3675    ResetEvent(osthread->interrupt_event());
3676  } // Otherwise leave the interrupted state alone
3677
3678  return interrupted;
3679}
3680
3681// Get's a pc (hint) for a running thread. Currently used only for profiling.
3682ExtendedPC os::get_thread_pc(Thread* thread) {
3683  CONTEXT context;
3684  context.ContextFlags = CONTEXT_CONTROL;
3685  HANDLE handle = thread->osthread()->thread_handle();
3686#ifdef _M_IA64
3687  assert(0, "Fix get_thread_pc");
3688  return ExtendedPC(NULL);
3689#else
3690  if (GetThreadContext(handle, &context)) {
3691#ifdef _M_AMD64
3692    return ExtendedPC((address) context.Rip);
3693#else
3694    return ExtendedPC((address) context.Eip);
3695#endif
3696  } else {
3697    return ExtendedPC(NULL);
3698  }
3699#endif
3700}
3701
3702// GetCurrentThreadId() returns DWORD
3703intx os::current_thread_id()  { return GetCurrentThreadId(); }
3704
3705static int _initial_pid = 0;
3706
3707int os::current_process_id() {
3708  return (_initial_pid ? _initial_pid : _getpid());
3709}
3710
3711int    os::win32::_vm_page_size              = 0;
3712int    os::win32::_vm_allocation_granularity = 0;
3713int    os::win32::_processor_type            = 0;
3714// Processor level is not available on non-NT systems, use vm_version instead
3715int    os::win32::_processor_level           = 0;
3716julong os::win32::_physical_memory           = 0;
3717size_t os::win32::_default_stack_size        = 0;
3718
3719intx          os::win32::_os_thread_limit    = 0;
3720volatile intx os::win32::_os_thread_count    = 0;
3721
3722bool   os::win32::_is_windows_server         = false;
3723
3724// 6573254
3725// Currently, the bug is observed across all the supported Windows releases,
3726// including the latest one (as of this writing - Windows Server 2012 R2)
3727bool   os::win32::_has_exit_bug              = true;
3728
3729void os::win32::initialize_system_info() {
3730  SYSTEM_INFO si;
3731  GetSystemInfo(&si);
3732  _vm_page_size    = si.dwPageSize;
3733  _vm_allocation_granularity = si.dwAllocationGranularity;
3734  _processor_type  = si.dwProcessorType;
3735  _processor_level = si.wProcessorLevel;
3736  set_processor_count(si.dwNumberOfProcessors);
3737
3738  MEMORYSTATUSEX ms;
3739  ms.dwLength = sizeof(ms);
3740
3741  // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3742  // dwMemoryLoad (% of memory in use)
3743  GlobalMemoryStatusEx(&ms);
3744  _physical_memory = ms.ullTotalPhys;
3745
3746  OSVERSIONINFOEX oi;
3747  oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3748  GetVersionEx((OSVERSIONINFO*)&oi);
3749  switch (oi.dwPlatformId) {
3750  case VER_PLATFORM_WIN32_NT:
3751    {
3752      int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3753      if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3754          oi.wProductType == VER_NT_SERVER) {
3755        _is_windows_server = true;
3756      }
3757    }
3758    break;
3759  default: fatal("Unknown platform");
3760  }
3761
3762  _default_stack_size = os::current_stack_size();
3763  assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3764  assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3765         "stack size not a multiple of page size");
3766
3767  initialize_performance_counter();
3768}
3769
3770
3771HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3772                                      int ebuflen) {
3773  char path[MAX_PATH];
3774  DWORD size;
3775  DWORD pathLen = (DWORD)sizeof(path);
3776  HINSTANCE result = NULL;
3777
3778  // only allow library name without path component
3779  assert(strchr(name, '\\') == NULL, "path not allowed");
3780  assert(strchr(name, ':') == NULL, "path not allowed");
3781  if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3782    jio_snprintf(ebuf, ebuflen,
3783                 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3784    return NULL;
3785  }
3786
3787  // search system directory
3788  if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3789    if (size >= pathLen) {
3790      return NULL; // truncated
3791    }
3792    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3793      return NULL; // truncated
3794    }
3795    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3796      return result;
3797    }
3798  }
3799
3800  // try Windows directory
3801  if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3802    if (size >= pathLen) {
3803      return NULL; // truncated
3804    }
3805    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3806      return NULL; // truncated
3807    }
3808    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3809      return result;
3810    }
3811  }
3812
3813  jio_snprintf(ebuf, ebuflen,
3814               "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3815  return NULL;
3816}
3817
3818#define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3819#define EXIT_TIMEOUT 300000 /* 5 minutes */
3820
3821static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3822  InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3823  return TRUE;
3824}
3825
3826int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3827  // Basic approach:
3828  //  - Each exiting thread registers its intent to exit and then does so.
3829  //  - A thread trying to terminate the process must wait for all
3830  //    threads currently exiting to complete their exit.
3831
3832  if (os::win32::has_exit_bug()) {
3833    // The array holds handles of the threads that have started exiting by calling
3834    // _endthreadex().
3835    // Should be large enough to avoid blocking the exiting thread due to lack of
3836    // a free slot.
3837    static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3838    static int handle_count = 0;
3839
3840    static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3841    static CRITICAL_SECTION crit_sect;
3842    static volatile jint process_exiting = 0;
3843    int i, j;
3844    DWORD res;
3845    HANDLE hproc, hthr;
3846
3847    // The first thread that reached this point, initializes the critical section.
3848    if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3849      warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3850    } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3851      if (what != EPT_THREAD) {
3852        // Atomically set process_exiting before the critical section
3853        // to increase the visibility between racing threads.
3854        Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3855      }
3856      EnterCriticalSection(&crit_sect);
3857
3858      if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3859        // Remove from the array those handles of the threads that have completed exiting.
3860        for (i = 0, j = 0; i < handle_count; ++i) {
3861          res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3862          if (res == WAIT_TIMEOUT) {
3863            handles[j++] = handles[i];
3864          } else {
3865            if (res == WAIT_FAILED) {
3866              warning("WaitForSingleObject failed (%u) in %s: %d\n",
3867                      GetLastError(), __FILE__, __LINE__);
3868            }
3869            // Don't keep the handle, if we failed waiting for it.
3870            CloseHandle(handles[i]);
3871          }
3872        }
3873
3874        // If there's no free slot in the array of the kept handles, we'll have to
3875        // wait until at least one thread completes exiting.
3876        if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3877          // Raise the priority of the oldest exiting thread to increase its chances
3878          // to complete sooner.
3879          SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3880          res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3881          if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3882            i = (res - WAIT_OBJECT_0);
3883            handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3884            for (; i < handle_count; ++i) {
3885              handles[i] = handles[i + 1];
3886            }
3887          } else {
3888            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3889                    (res == WAIT_FAILED ? "failed" : "timed out"),
3890                    GetLastError(), __FILE__, __LINE__);
3891            // Don't keep handles, if we failed waiting for them.
3892            for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3893              CloseHandle(handles[i]);
3894            }
3895            handle_count = 0;
3896          }
3897        }
3898
3899        // Store a duplicate of the current thread handle in the array of handles.
3900        hproc = GetCurrentProcess();
3901        hthr = GetCurrentThread();
3902        if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3903                             0, FALSE, DUPLICATE_SAME_ACCESS)) {
3904          warning("DuplicateHandle failed (%u) in %s: %d\n",
3905                  GetLastError(), __FILE__, __LINE__);
3906        } else {
3907          ++handle_count;
3908        }
3909
3910        // The current exiting thread has stored its handle in the array, and now
3911        // should leave the critical section before calling _endthreadex().
3912
3913      } else if (what != EPT_THREAD && handle_count > 0) {
3914        jlong start_time, finish_time, timeout_left;
3915        // Before ending the process, make sure all the threads that had called
3916        // _endthreadex() completed.
3917
3918        // Set the priority level of the current thread to the same value as
3919        // the priority level of exiting threads.
3920        // This is to ensure it will be given a fair chance to execute if
3921        // the timeout expires.
3922        hthr = GetCurrentThread();
3923        SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3924        start_time = os::javaTimeNanos();
3925        finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3926        for (i = 0; ; ) {
3927          int portion_count = handle_count - i;
3928          if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3929            portion_count = MAXIMUM_WAIT_OBJECTS;
3930          }
3931          for (j = 0; j < portion_count; ++j) {
3932            SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3933          }
3934          timeout_left = (finish_time - start_time) / 1000000L;
3935          if (timeout_left < 0) {
3936            timeout_left = 0;
3937          }
3938          res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3939          if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3940            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3941                    (res == WAIT_FAILED ? "failed" : "timed out"),
3942                    GetLastError(), __FILE__, __LINE__);
3943            // Reset portion_count so we close the remaining
3944            // handles due to this error.
3945            portion_count = handle_count - i;
3946          }
3947          for (j = 0; j < portion_count; ++j) {
3948            CloseHandle(handles[i + j]);
3949          }
3950          if ((i += portion_count) >= handle_count) {
3951            break;
3952          }
3953          start_time = os::javaTimeNanos();
3954        }
3955        handle_count = 0;
3956      }
3957
3958      LeaveCriticalSection(&crit_sect);
3959    }
3960
3961    if (OrderAccess::load_acquire(&process_exiting) != 0 &&
3962        process_exiting != (jint)GetCurrentThreadId()) {
3963      // Some other thread is about to call exit(), so we
3964      // don't let the current thread proceed to exit() or _endthreadex()
3965      while (true) {
3966        SuspendThread(GetCurrentThread());
3967        // Avoid busy-wait loop, if SuspendThread() failed.
3968        Sleep(EXIT_TIMEOUT);
3969      }
3970    }
3971  }
3972
3973  // We are here if either
3974  // - there's no 'race at exit' bug on this OS release;
3975  // - initialization of the critical section failed (unlikely);
3976  // - the current thread has stored its handle and left the critical section;
3977  // - the process-exiting thread has raised the flag and left the critical section.
3978  if (what == EPT_THREAD) {
3979    _endthreadex((unsigned)exit_code);
3980  } else if (what == EPT_PROCESS) {
3981    ::exit(exit_code);
3982  } else {
3983    _exit(exit_code);
3984  }
3985
3986  // Should not reach here
3987  return exit_code;
3988}
3989
3990#undef EXIT_TIMEOUT
3991
3992void os::win32::setmode_streams() {
3993  _setmode(_fileno(stdin), _O_BINARY);
3994  _setmode(_fileno(stdout), _O_BINARY);
3995  _setmode(_fileno(stderr), _O_BINARY);
3996}
3997
3998
3999bool os::is_debugger_attached() {
4000  return IsDebuggerPresent() ? true : false;
4001}
4002
4003
4004void os::wait_for_keypress_at_exit(void) {
4005  if (PauseAtExit) {
4006    fprintf(stderr, "Press any key to continue...\n");
4007    fgetc(stdin);
4008  }
4009}
4010
4011
4012bool os::message_box(const char* title, const char* message) {
4013  int result = MessageBox(NULL, message, title,
4014                          MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4015  return result == IDYES;
4016}
4017
4018#ifndef PRODUCT
4019#ifndef _WIN64
4020// Helpers to check whether NX protection is enabled
4021int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4022  if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4023      pex->ExceptionRecord->NumberParameters > 0 &&
4024      pex->ExceptionRecord->ExceptionInformation[0] ==
4025      EXCEPTION_INFO_EXEC_VIOLATION) {
4026    return EXCEPTION_EXECUTE_HANDLER;
4027  }
4028  return EXCEPTION_CONTINUE_SEARCH;
4029}
4030
4031void nx_check_protection() {
4032  // If NX is enabled we'll get an exception calling into code on the stack
4033  char code[] = { (char)0xC3 }; // ret
4034  void *code_ptr = (void *)code;
4035  __try {
4036    __asm call code_ptr
4037  } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4038    tty->print_raw_cr("NX protection detected.");
4039  }
4040}
4041#endif // _WIN64
4042#endif // PRODUCT
4043
4044// This is called _before_ the global arguments have been parsed
4045void os::init(void) {
4046  _initial_pid = _getpid();
4047
4048  init_random(1234567);
4049
4050  win32::initialize_system_info();
4051  win32::setmode_streams();
4052  init_page_sizes((size_t) win32::vm_page_size());
4053
4054  // This may be overridden later when argument processing is done.
4055  FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4056
4057  // Initialize main_process and main_thread
4058  main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4059  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4060                       &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4061    fatal("DuplicateHandle failed\n");
4062  }
4063  main_thread_id = (int) GetCurrentThreadId();
4064
4065  // initialize fast thread access - only used for 32-bit
4066  win32::initialize_thread_ptr_offset();
4067}
4068
4069// To install functions for atexit processing
4070extern "C" {
4071  static void perfMemory_exit_helper() {
4072    perfMemory_exit();
4073  }
4074}
4075
4076static jint initSock();
4077
4078// this is called _after_ the global arguments have been parsed
4079jint os::init_2(void) {
4080  // Allocate a single page and mark it as readable for safepoint polling
4081  address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4082  guarantee(polling_page != NULL, "Reserve Failed for polling page");
4083
4084  address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4085  guarantee(return_page != NULL, "Commit Failed for polling page");
4086
4087  os::set_polling_page(polling_page);
4088
4089#ifndef PRODUCT
4090  if (Verbose && PrintMiscellaneous) {
4091    tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n",
4092               (intptr_t)polling_page);
4093  }
4094#endif
4095
4096  if (!UseMembar) {
4097    address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4098    guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4099
4100    return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4101    guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4102
4103    os::set_memory_serialize_page(mem_serialize_page);
4104
4105#ifndef PRODUCT
4106    if (Verbose && PrintMiscellaneous) {
4107      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n",
4108                 (intptr_t)mem_serialize_page);
4109    }
4110#endif
4111  }
4112
4113  // Setup Windows Exceptions
4114
4115  // for debugging float code generation bugs
4116  if (ForceFloatExceptions) {
4117#ifndef  _WIN64
4118    static long fp_control_word = 0;
4119    __asm { fstcw fp_control_word }
4120    // see Intel PPro Manual, Vol. 2, p 7-16
4121    const long precision = 0x20;
4122    const long underflow = 0x10;
4123    const long overflow  = 0x08;
4124    const long zero_div  = 0x04;
4125    const long denorm    = 0x02;
4126    const long invalid   = 0x01;
4127    fp_control_word |= invalid;
4128    __asm { fldcw fp_control_word }
4129#endif
4130  }
4131
4132  // If stack_commit_size is 0, windows will reserve the default size,
4133  // but only commit a small portion of it.
4134  size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4135  size_t default_reserve_size = os::win32::default_stack_size();
4136  size_t actual_reserve_size = stack_commit_size;
4137  if (stack_commit_size < default_reserve_size) {
4138    // If stack_commit_size == 0, we want this too
4139    actual_reserve_size = default_reserve_size;
4140  }
4141
4142  // Check minimum allowable stack size for thread creation and to initialize
4143  // the java system classes, including StackOverflowError - depends on page
4144  // size.  Add a page for compiler2 recursion in main thread.
4145  // Add in 2*BytesPerWord times page size to account for VM stack during
4146  // class initialization depending on 32 or 64 bit VM.
4147  size_t min_stack_allowed =
4148            (size_t)(JavaThread::stack_yellow_zone_size() + JavaThread::stack_red_zone_size() +
4149                     JavaThread::stack_shadow_zone_size() +
4150                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size());
4151  if (actual_reserve_size < min_stack_allowed) {
4152    tty->print_cr("\nThe stack size specified is too small, "
4153                  "Specify at least %dk",
4154                  min_stack_allowed / K);
4155    return JNI_ERR;
4156  }
4157
4158  JavaThread::set_stack_size_at_create(stack_commit_size);
4159
4160  // Calculate theoretical max. size of Threads to guard gainst artifical
4161  // out-of-memory situations, where all available address-space has been
4162  // reserved by thread stacks.
4163  assert(actual_reserve_size != 0, "Must have a stack");
4164
4165  // Calculate the thread limit when we should start doing Virtual Memory
4166  // banging. Currently when the threads will have used all but 200Mb of space.
4167  //
4168  // TODO: consider performing a similar calculation for commit size instead
4169  // as reserve size, since on a 64-bit platform we'll run into that more
4170  // often than running out of virtual memory space.  We can use the
4171  // lower value of the two calculations as the os_thread_limit.
4172  size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4173  win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4174
4175  // at exit methods are called in the reverse order of their registration.
4176  // there is no limit to the number of functions registered. atexit does
4177  // not set errno.
4178
4179  if (PerfAllowAtExitRegistration) {
4180    // only register atexit functions if PerfAllowAtExitRegistration is set.
4181    // atexit functions can be delayed until process exit time, which
4182    // can be problematic for embedded VM situations. Embedded VMs should
4183    // call DestroyJavaVM() to assure that VM resources are released.
4184
4185    // note: perfMemory_exit_helper atexit function may be removed in
4186    // the future if the appropriate cleanup code can be added to the
4187    // VM_Exit VMOperation's doit method.
4188    if (atexit(perfMemory_exit_helper) != 0) {
4189      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4190    }
4191  }
4192
4193#ifndef _WIN64
4194  // Print something if NX is enabled (win32 on AMD64)
4195  NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4196#endif
4197
4198  // initialize thread priority policy
4199  prio_init();
4200
4201  if (UseNUMA && !ForceNUMA) {
4202    UseNUMA = false; // We don't fully support this yet
4203  }
4204
4205  if (UseNUMAInterleaving) {
4206    // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4207    bool success = numa_interleaving_init();
4208    if (!success) UseNUMAInterleaving = false;
4209  }
4210
4211  if (initSock() != JNI_OK) {
4212    return JNI_ERR;
4213  }
4214
4215  return JNI_OK;
4216}
4217
4218// Mark the polling page as unreadable
4219void os::make_polling_page_unreadable(void) {
4220  DWORD old_status;
4221  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4222                      PAGE_NOACCESS, &old_status)) {
4223    fatal("Could not disable polling page");
4224  }
4225}
4226
4227// Mark the polling page as readable
4228void os::make_polling_page_readable(void) {
4229  DWORD old_status;
4230  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4231                      PAGE_READONLY, &old_status)) {
4232    fatal("Could not enable polling page");
4233  }
4234}
4235
4236
4237int os::stat(const char *path, struct stat *sbuf) {
4238  char pathbuf[MAX_PATH];
4239  if (strlen(path) > MAX_PATH - 1) {
4240    errno = ENAMETOOLONG;
4241    return -1;
4242  }
4243  os::native_path(strcpy(pathbuf, path));
4244  int ret = ::stat(pathbuf, sbuf);
4245  if (sbuf != NULL && UseUTCFileTimestamp) {
4246    // Fix for 6539723.  st_mtime returned from stat() is dependent on
4247    // the system timezone and so can return different values for the
4248    // same file if/when daylight savings time changes.  This adjustment
4249    // makes sure the same timestamp is returned regardless of the TZ.
4250    //
4251    // See:
4252    // http://msdn.microsoft.com/library/
4253    //   default.asp?url=/library/en-us/sysinfo/base/
4254    //   time_zone_information_str.asp
4255    // and
4256    // http://msdn.microsoft.com/library/default.asp?url=
4257    //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4258    //
4259    // NOTE: there is a insidious bug here:  If the timezone is changed
4260    // after the call to stat() but before 'GetTimeZoneInformation()', then
4261    // the adjustment we do here will be wrong and we'll return the wrong
4262    // value (which will likely end up creating an invalid class data
4263    // archive).  Absent a better API for this, or some time zone locking
4264    // mechanism, we'll have to live with this risk.
4265    TIME_ZONE_INFORMATION tz;
4266    DWORD tzid = GetTimeZoneInformation(&tz);
4267    int daylightBias =
4268      (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4269    sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4270  }
4271  return ret;
4272}
4273
4274
4275#define FT2INT64(ft) \
4276  ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4277
4278
4279// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4280// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4281// of a thread.
4282//
4283// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4284// the fast estimate available on the platform.
4285
4286// current_thread_cpu_time() is not optimized for Windows yet
4287jlong os::current_thread_cpu_time() {
4288  // return user + sys since the cost is the same
4289  return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4290}
4291
4292jlong os::thread_cpu_time(Thread* thread) {
4293  // consistent with what current_thread_cpu_time() returns.
4294  return os::thread_cpu_time(thread, true /* user+sys */);
4295}
4296
4297jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4298  return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4299}
4300
4301jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4302  // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4303  // If this function changes, os::is_thread_cpu_time_supported() should too
4304  FILETIME CreationTime;
4305  FILETIME ExitTime;
4306  FILETIME KernelTime;
4307  FILETIME UserTime;
4308
4309  if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4310                      &ExitTime, &KernelTime, &UserTime) == 0) {
4311    return -1;
4312  } else if (user_sys_cpu_time) {
4313    return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4314  } else {
4315    return FT2INT64(UserTime) * 100;
4316  }
4317}
4318
4319void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4320  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4321  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4322  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4323  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4324}
4325
4326void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4327  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4328  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4329  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4330  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4331}
4332
4333bool os::is_thread_cpu_time_supported() {
4334  // see os::thread_cpu_time
4335  FILETIME CreationTime;
4336  FILETIME ExitTime;
4337  FILETIME KernelTime;
4338  FILETIME UserTime;
4339
4340  if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4341                      &KernelTime, &UserTime) == 0) {
4342    return false;
4343  } else {
4344    return true;
4345  }
4346}
4347
4348// Windows does't provide a loadavg primitive so this is stubbed out for now.
4349// It does have primitives (PDH API) to get CPU usage and run queue length.
4350// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4351// If we wanted to implement loadavg on Windows, we have a few options:
4352//
4353// a) Query CPU usage and run queue length and "fake" an answer by
4354//    returning the CPU usage if it's under 100%, and the run queue
4355//    length otherwise.  It turns out that querying is pretty slow
4356//    on Windows, on the order of 200 microseconds on a fast machine.
4357//    Note that on the Windows the CPU usage value is the % usage
4358//    since the last time the API was called (and the first call
4359//    returns 100%), so we'd have to deal with that as well.
4360//
4361// b) Sample the "fake" answer using a sampling thread and store
4362//    the answer in a global variable.  The call to loadavg would
4363//    just return the value of the global, avoiding the slow query.
4364//
4365// c) Sample a better answer using exponential decay to smooth the
4366//    value.  This is basically the algorithm used by UNIX kernels.
4367//
4368// Note that sampling thread starvation could affect both (b) and (c).
4369int os::loadavg(double loadavg[], int nelem) {
4370  return -1;
4371}
4372
4373
4374// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4375bool os::dont_yield() {
4376  return DontYieldALot;
4377}
4378
4379// This method is a slightly reworked copy of JDK's sysOpen
4380// from src/windows/hpi/src/sys_api_md.c
4381
4382int os::open(const char *path, int oflag, int mode) {
4383  char pathbuf[MAX_PATH];
4384
4385  if (strlen(path) > MAX_PATH - 1) {
4386    errno = ENAMETOOLONG;
4387    return -1;
4388  }
4389  os::native_path(strcpy(pathbuf, path));
4390  return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4391}
4392
4393FILE* os::open(int fd, const char* mode) {
4394  return ::_fdopen(fd, mode);
4395}
4396
4397// Is a (classpath) directory empty?
4398bool os::dir_is_empty(const char* path) {
4399  WIN32_FIND_DATA fd;
4400  HANDLE f = FindFirstFile(path, &fd);
4401  if (f == INVALID_HANDLE_VALUE) {
4402    return true;
4403  }
4404  FindClose(f);
4405  return false;
4406}
4407
4408// create binary file, rewriting existing file if required
4409int os::create_binary_file(const char* path, bool rewrite_existing) {
4410  int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4411  if (!rewrite_existing) {
4412    oflags |= _O_EXCL;
4413  }
4414  return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4415}
4416
4417// return current position of file pointer
4418jlong os::current_file_offset(int fd) {
4419  return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4420}
4421
4422// move file pointer to the specified offset
4423jlong os::seek_to_file_offset(int fd, jlong offset) {
4424  return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4425}
4426
4427
4428jlong os::lseek(int fd, jlong offset, int whence) {
4429  return (jlong) ::_lseeki64(fd, offset, whence);
4430}
4431
4432size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4433  OVERLAPPED ov;
4434  DWORD nread;
4435  BOOL result;
4436
4437  ZeroMemory(&ov, sizeof(ov));
4438  ov.Offset = (DWORD)offset;
4439  ov.OffsetHigh = (DWORD)(offset >> 32);
4440
4441  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4442
4443  result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4444
4445  return result ? nread : 0;
4446}
4447
4448
4449// This method is a slightly reworked copy of JDK's sysNativePath
4450// from src/windows/hpi/src/path_md.c
4451
4452// Convert a pathname to native format.  On win32, this involves forcing all
4453// separators to be '\\' rather than '/' (both are legal inputs, but Win95
4454// sometimes rejects '/') and removing redundant separators.  The input path is
4455// assumed to have been converted into the character encoding used by the local
4456// system.  Because this might be a double-byte encoding, care is taken to
4457// treat double-byte lead characters correctly.
4458//
4459// This procedure modifies the given path in place, as the result is never
4460// longer than the original.  There is no error return; this operation always
4461// succeeds.
4462char * os::native_path(char *path) {
4463  char *src = path, *dst = path, *end = path;
4464  char *colon = NULL;  // If a drive specifier is found, this will
4465                       // point to the colon following the drive letter
4466
4467  // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4468  assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4469          && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4470
4471  // Check for leading separators
4472#define isfilesep(c) ((c) == '/' || (c) == '\\')
4473  while (isfilesep(*src)) {
4474    src++;
4475  }
4476
4477  if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4478    // Remove leading separators if followed by drive specifier.  This
4479    // hack is necessary to support file URLs containing drive
4480    // specifiers (e.g., "file://c:/path").  As a side effect,
4481    // "/c:/path" can be used as an alternative to "c:/path".
4482    *dst++ = *src++;
4483    colon = dst;
4484    *dst++ = ':';
4485    src++;
4486  } else {
4487    src = path;
4488    if (isfilesep(src[0]) && isfilesep(src[1])) {
4489      // UNC pathname: Retain first separator; leave src pointed at
4490      // second separator so that further separators will be collapsed
4491      // into the second separator.  The result will be a pathname
4492      // beginning with "\\\\" followed (most likely) by a host name.
4493      src = dst = path + 1;
4494      path[0] = '\\';     // Force first separator to '\\'
4495    }
4496  }
4497
4498  end = dst;
4499
4500  // Remove redundant separators from remainder of path, forcing all
4501  // separators to be '\\' rather than '/'. Also, single byte space
4502  // characters are removed from the end of the path because those
4503  // are not legal ending characters on this operating system.
4504  //
4505  while (*src != '\0') {
4506    if (isfilesep(*src)) {
4507      *dst++ = '\\'; src++;
4508      while (isfilesep(*src)) src++;
4509      if (*src == '\0') {
4510        // Check for trailing separator
4511        end = dst;
4512        if (colon == dst - 2) break;  // "z:\\"
4513        if (dst == path + 1) break;   // "\\"
4514        if (dst == path + 2 && isfilesep(path[0])) {
4515          // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4516          // beginning of a UNC pathname.  Even though it is not, by
4517          // itself, a valid UNC pathname, we leave it as is in order
4518          // to be consistent with the path canonicalizer as well
4519          // as the win32 APIs, which treat this case as an invalid
4520          // UNC pathname rather than as an alias for the root
4521          // directory of the current drive.
4522          break;
4523        }
4524        end = --dst;  // Path does not denote a root directory, so
4525                      // remove trailing separator
4526        break;
4527      }
4528      end = dst;
4529    } else {
4530      if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4531        *dst++ = *src++;
4532        if (*src) *dst++ = *src++;
4533        end = dst;
4534      } else {  // Copy a single-byte character
4535        char c = *src++;
4536        *dst++ = c;
4537        // Space is not a legal ending character
4538        if (c != ' ') end = dst;
4539      }
4540    }
4541  }
4542
4543  *end = '\0';
4544
4545  // For "z:", add "." to work around a bug in the C runtime library
4546  if (colon == dst - 1) {
4547    path[2] = '.';
4548    path[3] = '\0';
4549  }
4550
4551  return path;
4552}
4553
4554// This code is a copy of JDK's sysSetLength
4555// from src/windows/hpi/src/sys_api_md.c
4556
4557int os::ftruncate(int fd, jlong length) {
4558  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4559  long high = (long)(length >> 32);
4560  DWORD ret;
4561
4562  if (h == (HANDLE)(-1)) {
4563    return -1;
4564  }
4565
4566  ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4567  if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4568    return -1;
4569  }
4570
4571  if (::SetEndOfFile(h) == FALSE) {
4572    return -1;
4573  }
4574
4575  return 0;
4576}
4577
4578
4579// This code is a copy of JDK's sysSync
4580// from src/windows/hpi/src/sys_api_md.c
4581// except for the legacy workaround for a bug in Win 98
4582
4583int os::fsync(int fd) {
4584  HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4585
4586  if ((!::FlushFileBuffers(handle)) &&
4587      (GetLastError() != ERROR_ACCESS_DENIED)) {
4588    // from winerror.h
4589    return -1;
4590  }
4591  return 0;
4592}
4593
4594static int nonSeekAvailable(int, long *);
4595static int stdinAvailable(int, long *);
4596
4597#define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4598#define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4599
4600// This code is a copy of JDK's sysAvailable
4601// from src/windows/hpi/src/sys_api_md.c
4602
4603int os::available(int fd, jlong *bytes) {
4604  jlong cur, end;
4605  struct _stati64 stbuf64;
4606
4607  if (::_fstati64(fd, &stbuf64) >= 0) {
4608    int mode = stbuf64.st_mode;
4609    if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4610      int ret;
4611      long lpbytes;
4612      if (fd == 0) {
4613        ret = stdinAvailable(fd, &lpbytes);
4614      } else {
4615        ret = nonSeekAvailable(fd, &lpbytes);
4616      }
4617      (*bytes) = (jlong)(lpbytes);
4618      return ret;
4619    }
4620    if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4621      return FALSE;
4622    } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4623      return FALSE;
4624    } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4625      return FALSE;
4626    }
4627    *bytes = end - cur;
4628    return TRUE;
4629  } else {
4630    return FALSE;
4631  }
4632}
4633
4634// This code is a copy of JDK's nonSeekAvailable
4635// from src/windows/hpi/src/sys_api_md.c
4636
4637static int nonSeekAvailable(int fd, long *pbytes) {
4638  // This is used for available on non-seekable devices
4639  // (like both named and anonymous pipes, such as pipes
4640  //  connected to an exec'd process).
4641  // Standard Input is a special case.
4642  HANDLE han;
4643
4644  if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4645    return FALSE;
4646  }
4647
4648  if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4649    // PeekNamedPipe fails when at EOF.  In that case we
4650    // simply make *pbytes = 0 which is consistent with the
4651    // behavior we get on Solaris when an fd is at EOF.
4652    // The only alternative is to raise an Exception,
4653    // which isn't really warranted.
4654    //
4655    if (::GetLastError() != ERROR_BROKEN_PIPE) {
4656      return FALSE;
4657    }
4658    *pbytes = 0;
4659  }
4660  return TRUE;
4661}
4662
4663#define MAX_INPUT_EVENTS 2000
4664
4665// This code is a copy of JDK's stdinAvailable
4666// from src/windows/hpi/src/sys_api_md.c
4667
4668static int stdinAvailable(int fd, long *pbytes) {
4669  HANDLE han;
4670  DWORD numEventsRead = 0;  // Number of events read from buffer
4671  DWORD numEvents = 0;      // Number of events in buffer
4672  DWORD i = 0;              // Loop index
4673  DWORD curLength = 0;      // Position marker
4674  DWORD actualLength = 0;   // Number of bytes readable
4675  BOOL error = FALSE;       // Error holder
4676  INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4677
4678  if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4679    return FALSE;
4680  }
4681
4682  // Construct an array of input records in the console buffer
4683  error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4684  if (error == 0) {
4685    return nonSeekAvailable(fd, pbytes);
4686  }
4687
4688  // lpBuffer must fit into 64K or else PeekConsoleInput fails
4689  if (numEvents > MAX_INPUT_EVENTS) {
4690    numEvents = MAX_INPUT_EVENTS;
4691  }
4692
4693  lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4694  if (lpBuffer == NULL) {
4695    return FALSE;
4696  }
4697
4698  error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4699  if (error == 0) {
4700    os::free(lpBuffer);
4701    return FALSE;
4702  }
4703
4704  // Examine input records for the number of bytes available
4705  for (i=0; i<numEvents; i++) {
4706    if (lpBuffer[i].EventType == KEY_EVENT) {
4707
4708      KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4709                                      &(lpBuffer[i].Event);
4710      if (keyRecord->bKeyDown == TRUE) {
4711        CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4712        curLength++;
4713        if (*keyPressed == '\r') {
4714          actualLength = curLength;
4715        }
4716      }
4717    }
4718  }
4719
4720  if (lpBuffer != NULL) {
4721    os::free(lpBuffer);
4722  }
4723
4724  *pbytes = (long) actualLength;
4725  return TRUE;
4726}
4727
4728// Map a block of memory.
4729char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4730                        char *addr, size_t bytes, bool read_only,
4731                        bool allow_exec) {
4732  HANDLE hFile;
4733  char* base;
4734
4735  hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4736                     OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4737  if (hFile == NULL) {
4738    if (PrintMiscellaneous && Verbose) {
4739      DWORD err = GetLastError();
4740      tty->print_cr("CreateFile() failed: GetLastError->%ld.", err);
4741    }
4742    return NULL;
4743  }
4744
4745  if (allow_exec) {
4746    // CreateFileMapping/MapViewOfFileEx can't map executable memory
4747    // unless it comes from a PE image (which the shared archive is not.)
4748    // Even VirtualProtect refuses to give execute access to mapped memory
4749    // that was not previously executable.
4750    //
4751    // Instead, stick the executable region in anonymous memory.  Yuck.
4752    // Penalty is that ~4 pages will not be shareable - in the future
4753    // we might consider DLLizing the shared archive with a proper PE
4754    // header so that mapping executable + sharing is possible.
4755
4756    base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4757                                PAGE_READWRITE);
4758    if (base == NULL) {
4759      if (PrintMiscellaneous && Verbose) {
4760        DWORD err = GetLastError();
4761        tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err);
4762      }
4763      CloseHandle(hFile);
4764      return NULL;
4765    }
4766
4767    DWORD bytes_read;
4768    OVERLAPPED overlapped;
4769    overlapped.Offset = (DWORD)file_offset;
4770    overlapped.OffsetHigh = 0;
4771    overlapped.hEvent = NULL;
4772    // ReadFile guarantees that if the return value is true, the requested
4773    // number of bytes were read before returning.
4774    bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4775    if (!res) {
4776      if (PrintMiscellaneous && Verbose) {
4777        DWORD err = GetLastError();
4778        tty->print_cr("ReadFile() failed: GetLastError->%ld.", err);
4779      }
4780      release_memory(base, bytes);
4781      CloseHandle(hFile);
4782      return NULL;
4783    }
4784  } else {
4785    HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4786                                    NULL /* file_name */);
4787    if (hMap == NULL) {
4788      if (PrintMiscellaneous && Verbose) {
4789        DWORD err = GetLastError();
4790        tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err);
4791      }
4792      CloseHandle(hFile);
4793      return NULL;
4794    }
4795
4796    DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4797    base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4798                                  (DWORD)bytes, addr);
4799    if (base == NULL) {
4800      if (PrintMiscellaneous && Verbose) {
4801        DWORD err = GetLastError();
4802        tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err);
4803      }
4804      CloseHandle(hMap);
4805      CloseHandle(hFile);
4806      return NULL;
4807    }
4808
4809    if (CloseHandle(hMap) == 0) {
4810      if (PrintMiscellaneous && Verbose) {
4811        DWORD err = GetLastError();
4812        tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err);
4813      }
4814      CloseHandle(hFile);
4815      return base;
4816    }
4817  }
4818
4819  if (allow_exec) {
4820    DWORD old_protect;
4821    DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4822    bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4823
4824    if (!res) {
4825      if (PrintMiscellaneous && Verbose) {
4826        DWORD err = GetLastError();
4827        tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err);
4828      }
4829      // Don't consider this a hard error, on IA32 even if the
4830      // VirtualProtect fails, we should still be able to execute
4831      CloseHandle(hFile);
4832      return base;
4833    }
4834  }
4835
4836  if (CloseHandle(hFile) == 0) {
4837    if (PrintMiscellaneous && Verbose) {
4838      DWORD err = GetLastError();
4839      tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err);
4840    }
4841    return base;
4842  }
4843
4844  return base;
4845}
4846
4847
4848// Remap a block of memory.
4849char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4850                          char *addr, size_t bytes, bool read_only,
4851                          bool allow_exec) {
4852  // This OS does not allow existing memory maps to be remapped so we
4853  // have to unmap the memory before we remap it.
4854  if (!os::unmap_memory(addr, bytes)) {
4855    return NULL;
4856  }
4857
4858  // There is a very small theoretical window between the unmap_memory()
4859  // call above and the map_memory() call below where a thread in native
4860  // code may be able to access an address that is no longer mapped.
4861
4862  return os::map_memory(fd, file_name, file_offset, addr, bytes,
4863                        read_only, allow_exec);
4864}
4865
4866
4867// Unmap a block of memory.
4868// Returns true=success, otherwise false.
4869
4870bool os::pd_unmap_memory(char* addr, size_t bytes) {
4871  MEMORY_BASIC_INFORMATION mem_info;
4872  if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4873    if (PrintMiscellaneous && Verbose) {
4874      DWORD err = GetLastError();
4875      tty->print_cr("VirtualQuery() failed: GetLastError->%ld.", err);
4876    }
4877    return false;
4878  }
4879
4880  // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4881  // Instead, executable region was allocated using VirtualAlloc(). See
4882  // pd_map_memory() above.
4883  //
4884  // The following flags should match the 'exec_access' flages used for
4885  // VirtualProtect() in pd_map_memory().
4886  if (mem_info.Protect == PAGE_EXECUTE_READ ||
4887      mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4888    return pd_release_memory(addr, bytes);
4889  }
4890
4891  BOOL result = UnmapViewOfFile(addr);
4892  if (result == 0) {
4893    if (PrintMiscellaneous && Verbose) {
4894      DWORD err = GetLastError();
4895      tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err);
4896    }
4897    return false;
4898  }
4899  return true;
4900}
4901
4902void os::pause() {
4903  char filename[MAX_PATH];
4904  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4905    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4906  } else {
4907    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4908  }
4909
4910  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4911  if (fd != -1) {
4912    struct stat buf;
4913    ::close(fd);
4914    while (::stat(filename, &buf) == 0) {
4915      Sleep(100);
4916    }
4917  } else {
4918    jio_fprintf(stderr,
4919                "Could not open pause file '%s', continuing immediately.\n", filename);
4920  }
4921}
4922
4923os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4924  assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4925}
4926
4927// See the caveats for this class in os_windows.hpp
4928// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4929// into this method and returns false. If no OS EXCEPTION was raised, returns
4930// true.
4931// The callback is supposed to provide the method that should be protected.
4932//
4933bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4934  assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4935  assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4936         "crash_protection already set?");
4937
4938  bool success = true;
4939  __try {
4940    WatcherThread::watcher_thread()->set_crash_protection(this);
4941    cb.call();
4942  } __except(EXCEPTION_EXECUTE_HANDLER) {
4943    // only for protection, nothing to do
4944    success = false;
4945  }
4946  WatcherThread::watcher_thread()->set_crash_protection(NULL);
4947  return success;
4948}
4949
4950// An Event wraps a win32 "CreateEvent" kernel handle.
4951//
4952// We have a number of choices regarding "CreateEvent" win32 handle leakage:
4953//
4954// 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4955//     field, and call CloseHandle() on the win32 event handle.  Unpark() would
4956//     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4957//     In addition, an unpark() operation might fetch the handle field, but the
4958//     event could recycle between the fetch and the SetEvent() operation.
4959//     SetEvent() would either fail because the handle was invalid, or inadvertently work,
4960//     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
4961//     on an stale but recycled handle would be harmless, but in practice this might
4962//     confuse other non-Sun code, so it's not a viable approach.
4963//
4964// 2:  Once a win32 event handle is associated with an Event, it remains associated
4965//     with the Event.  The event handle is never closed.  This could be construed
4966//     as handle leakage, but only up to the maximum # of threads that have been extant
4967//     at any one time.  This shouldn't be an issue, as windows platforms typically
4968//     permit a process to have hundreds of thousands of open handles.
4969//
4970// 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
4971//     and release unused handles.
4972//
4973// 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
4974//     It's not clear, however, that we wouldn't be trading one type of leak for another.
4975//
4976// 5.  Use an RCU-like mechanism (Read-Copy Update).
4977//     Or perhaps something similar to Maged Michael's "Hazard pointers".
4978//
4979// We use (2).
4980//
4981// TODO-FIXME:
4982// 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
4983// 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
4984//     to recover from (or at least detect) the dreaded Windows 841176 bug.
4985// 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
4986//     into a single win32 CreateEvent() handle.
4987//
4988// Assumption:
4989//    Only one parker can exist on an event, which is why we allocate
4990//    them per-thread. Multiple unparkers can coexist.
4991//
4992// _Event transitions in park()
4993//   -1 => -1 : illegal
4994//    1 =>  0 : pass - return immediately
4995//    0 => -1 : block; then set _Event to 0 before returning
4996//
4997// _Event transitions in unpark()
4998//    0 => 1 : just return
4999//    1 => 1 : just return
5000//   -1 => either 0 or 1; must signal target thread
5001//         That is, we can safely transition _Event from -1 to either
5002//         0 or 1.
5003//
5004// _Event serves as a restricted-range semaphore.
5005//   -1 : thread is blocked, i.e. there is a waiter
5006//    0 : neutral: thread is running or ready,
5007//        could have been signaled after a wait started
5008//    1 : signaled - thread is running or ready
5009//
5010// Another possible encoding of _Event would be with
5011// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5012//
5013
5014int os::PlatformEvent::park(jlong Millis) {
5015  // Transitions for _Event:
5016  //   -1 => -1 : illegal
5017  //    1 =>  0 : pass - return immediately
5018  //    0 => -1 : block; then set _Event to 0 before returning
5019
5020  guarantee(_ParkHandle != NULL , "Invariant");
5021  guarantee(Millis > 0          , "Invariant");
5022
5023  // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5024  // the initial park() operation.
5025  // Consider: use atomic decrement instead of CAS-loop
5026
5027  int v;
5028  for (;;) {
5029    v = _Event;
5030    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5031  }
5032  guarantee((v == 0) || (v == 1), "invariant");
5033  if (v != 0) return OS_OK;
5034
5035  // Do this the hard way by blocking ...
5036  // TODO: consider a brief spin here, gated on the success of recent
5037  // spin attempts by this thread.
5038  //
5039  // We decompose long timeouts into series of shorter timed waits.
5040  // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5041  // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5042  // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5043  // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5044  // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5045  // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5046  // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5047  // for the already waited time.  This policy does not admit any new outcomes.
5048  // In the future, however, we might want to track the accumulated wait time and
5049  // adjust Millis accordingly if we encounter a spurious wakeup.
5050
5051  const int MAXTIMEOUT = 0x10000000;
5052  DWORD rv = WAIT_TIMEOUT;
5053  while (_Event < 0 && Millis > 0) {
5054    DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5055    if (Millis > MAXTIMEOUT) {
5056      prd = MAXTIMEOUT;
5057    }
5058    rv = ::WaitForSingleObject(_ParkHandle, prd);
5059    assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5060    if (rv == WAIT_TIMEOUT) {
5061      Millis -= prd;
5062    }
5063  }
5064  v = _Event;
5065  _Event = 0;
5066  // see comment at end of os::PlatformEvent::park() below:
5067  OrderAccess::fence();
5068  // If we encounter a nearly simultanous timeout expiry and unpark()
5069  // we return OS_OK indicating we awoke via unpark().
5070  // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5071  return (v >= 0) ? OS_OK : OS_TIMEOUT;
5072}
5073
5074void os::PlatformEvent::park() {
5075  // Transitions for _Event:
5076  //   -1 => -1 : illegal
5077  //    1 =>  0 : pass - return immediately
5078  //    0 => -1 : block; then set _Event to 0 before returning
5079
5080  guarantee(_ParkHandle != NULL, "Invariant");
5081  // Invariant: Only the thread associated with the Event/PlatformEvent
5082  // may call park().
5083  // Consider: use atomic decrement instead of CAS-loop
5084  int v;
5085  for (;;) {
5086    v = _Event;
5087    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5088  }
5089  guarantee((v == 0) || (v == 1), "invariant");
5090  if (v != 0) return;
5091
5092  // Do this the hard way by blocking ...
5093  // TODO: consider a brief spin here, gated on the success of recent
5094  // spin attempts by this thread.
5095  while (_Event < 0) {
5096    DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5097    assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5098  }
5099
5100  // Usually we'll find _Event == 0 at this point, but as
5101  // an optional optimization we clear it, just in case can
5102  // multiple unpark() operations drove _Event up to 1.
5103  _Event = 0;
5104  OrderAccess::fence();
5105  guarantee(_Event >= 0, "invariant");
5106}
5107
5108void os::PlatformEvent::unpark() {
5109  guarantee(_ParkHandle != NULL, "Invariant");
5110
5111  // Transitions for _Event:
5112  //    0 => 1 : just return
5113  //    1 => 1 : just return
5114  //   -1 => either 0 or 1; must signal target thread
5115  //         That is, we can safely transition _Event from -1 to either
5116  //         0 or 1.
5117  // See also: "Semaphores in Plan 9" by Mullender & Cox
5118  //
5119  // Note: Forcing a transition from "-1" to "1" on an unpark() means
5120  // that it will take two back-to-back park() calls for the owning
5121  // thread to block. This has the benefit of forcing a spurious return
5122  // from the first park() call after an unpark() call which will help
5123  // shake out uses of park() and unpark() without condition variables.
5124
5125  if (Atomic::xchg(1, &_Event) >= 0) return;
5126
5127  ::SetEvent(_ParkHandle);
5128}
5129
5130
5131// JSR166
5132// -------------------------------------------------------
5133
5134// The Windows implementation of Park is very straightforward: Basic
5135// operations on Win32 Events turn out to have the right semantics to
5136// use them directly. We opportunistically resuse the event inherited
5137// from Monitor.
5138
5139void Parker::park(bool isAbsolute, jlong time) {
5140  guarantee(_ParkEvent != NULL, "invariant");
5141  // First, demultiplex/decode time arguments
5142  if (time < 0) { // don't wait
5143    return;
5144  } else if (time == 0 && !isAbsolute) {
5145    time = INFINITE;
5146  } else if (isAbsolute) {
5147    time -= os::javaTimeMillis(); // convert to relative time
5148    if (time <= 0) {  // already elapsed
5149      return;
5150    }
5151  } else { // relative
5152    time /= 1000000;  // Must coarsen from nanos to millis
5153    if (time == 0) {  // Wait for the minimal time unit if zero
5154      time = 1;
5155    }
5156  }
5157
5158  JavaThread* thread = JavaThread::current();
5159
5160  // Don't wait if interrupted or already triggered
5161  if (Thread::is_interrupted(thread, false) ||
5162      WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5163    ResetEvent(_ParkEvent);
5164    return;
5165  } else {
5166    ThreadBlockInVM tbivm(thread);
5167    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5168    thread->set_suspend_equivalent();
5169
5170    WaitForSingleObject(_ParkEvent, time);
5171    ResetEvent(_ParkEvent);
5172
5173    // If externally suspended while waiting, re-suspend
5174    if (thread->handle_special_suspend_equivalent_condition()) {
5175      thread->java_suspend_self();
5176    }
5177  }
5178}
5179
5180void Parker::unpark() {
5181  guarantee(_ParkEvent != NULL, "invariant");
5182  SetEvent(_ParkEvent);
5183}
5184
5185// Run the specified command in a separate process. Return its exit value,
5186// or -1 on failure (e.g. can't create a new process).
5187int os::fork_and_exec(char* cmd) {
5188  STARTUPINFO si;
5189  PROCESS_INFORMATION pi;
5190
5191  memset(&si, 0, sizeof(si));
5192  si.cb = sizeof(si);
5193  memset(&pi, 0, sizeof(pi));
5194  BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5195                            cmd,    // command line
5196                            NULL,   // process security attribute
5197                            NULL,   // thread security attribute
5198                            TRUE,   // inherits system handles
5199                            0,      // no creation flags
5200                            NULL,   // use parent's environment block
5201                            NULL,   // use parent's starting directory
5202                            &si,    // (in) startup information
5203                            &pi);   // (out) process information
5204
5205  if (rslt) {
5206    // Wait until child process exits.
5207    WaitForSingleObject(pi.hProcess, INFINITE);
5208
5209    DWORD exit_code;
5210    GetExitCodeProcess(pi.hProcess, &exit_code);
5211
5212    // Close process and thread handles.
5213    CloseHandle(pi.hProcess);
5214    CloseHandle(pi.hThread);
5215
5216    return (int)exit_code;
5217  } else {
5218    return -1;
5219  }
5220}
5221
5222//--------------------------------------------------------------------------------------------------
5223// Non-product code
5224
5225static int mallocDebugIntervalCounter = 0;
5226static int mallocDebugCounter = 0;
5227bool os::check_heap(bool force) {
5228  if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5229  if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5230    // Note: HeapValidate executes two hardware breakpoints when it finds something
5231    // wrong; at these points, eax contains the address of the offending block (I think).
5232    // To get to the exlicit error message(s) below, just continue twice.
5233    //
5234    // Note:  we want to check the CRT heap, which is not necessarily located in the
5235    // process default heap.
5236    HANDLE heap = (HANDLE) _get_heap_handle();
5237    if (!heap) {
5238      return true;
5239    }
5240
5241    // If we fail to lock the heap, then gflags.exe has been used
5242    // or some other special heap flag has been set that prevents
5243    // locking. We don't try to walk a heap we can't lock.
5244    if (HeapLock(heap) != 0) {
5245      PROCESS_HEAP_ENTRY phe;
5246      phe.lpData = NULL;
5247      while (HeapWalk(heap, &phe) != 0) {
5248        if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5249            !HeapValidate(heap, 0, phe.lpData)) {
5250          tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5251          tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData);
5252          HeapUnlock(heap);
5253          fatal("corrupted C heap");
5254        }
5255      }
5256      DWORD err = GetLastError();
5257      if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) {
5258        HeapUnlock(heap);
5259        fatal("heap walk aborted with error %d", err);
5260      }
5261      HeapUnlock(heap);
5262    }
5263    mallocDebugIntervalCounter = 0;
5264  }
5265  return true;
5266}
5267
5268
5269bool os::find(address addr, outputStream* st) {
5270  int offset = -1;
5271  bool result = false;
5272  char buf[256];
5273  if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5274    st->print(PTR_FORMAT " ", addr);
5275    if (strlen(buf) < sizeof(buf) - 1) {
5276      char* p = strrchr(buf, '\\');
5277      if (p) {
5278        st->print("%s", p + 1);
5279      } else {
5280        st->print("%s", buf);
5281      }
5282    } else {
5283        // The library name is probably truncated. Let's omit the library name.
5284        // See also JDK-8147512.
5285    }
5286    if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5287      st->print("::%s + 0x%x", buf, offset);
5288    }
5289    st->cr();
5290    result = true;
5291  }
5292  return result;
5293}
5294
5295LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5296  DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5297
5298  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5299    JavaThread* thread = JavaThread::current();
5300    PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5301    address addr = (address) exceptionRecord->ExceptionInformation[1];
5302
5303    if (os::is_memory_serialize_page(thread, addr)) {
5304      return EXCEPTION_CONTINUE_EXECUTION;
5305    }
5306  }
5307
5308  return EXCEPTION_CONTINUE_SEARCH;
5309}
5310
5311// We don't build a headless jre for Windows
5312bool os::is_headless_jre() { return false; }
5313
5314static jint initSock() {
5315  WSADATA wsadata;
5316
5317  if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5318    jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5319                ::GetLastError());
5320    return JNI_ERR;
5321  }
5322  return JNI_OK;
5323}
5324
5325struct hostent* os::get_host_by_name(char* name) {
5326  return (struct hostent*)gethostbyname(name);
5327}
5328
5329int os::socket_close(int fd) {
5330  return ::closesocket(fd);
5331}
5332
5333int os::socket(int domain, int type, int protocol) {
5334  return ::socket(domain, type, protocol);
5335}
5336
5337int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5338  return ::connect(fd, him, len);
5339}
5340
5341int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5342  return ::recv(fd, buf, (int)nBytes, flags);
5343}
5344
5345int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5346  return ::send(fd, buf, (int)nBytes, flags);
5347}
5348
5349int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5350  return ::send(fd, buf, (int)nBytes, flags);
5351}
5352
5353// WINDOWS CONTEXT Flags for THREAD_SAMPLING
5354#if defined(IA32)
5355  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5356#elif defined (AMD64)
5357  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5358#endif
5359
5360// returns true if thread could be suspended,
5361// false otherwise
5362static bool do_suspend(HANDLE* h) {
5363  if (h != NULL) {
5364    if (SuspendThread(*h) != ~0) {
5365      return true;
5366    }
5367  }
5368  return false;
5369}
5370
5371// resume the thread
5372// calling resume on an active thread is a no-op
5373static void do_resume(HANDLE* h) {
5374  if (h != NULL) {
5375    ResumeThread(*h);
5376  }
5377}
5378
5379// retrieve a suspend/resume context capable handle
5380// from the tid. Caller validates handle return value.
5381void get_thread_handle_for_extended_context(HANDLE* h,
5382                                            OSThread::thread_id_t tid) {
5383  if (h != NULL) {
5384    *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5385  }
5386}
5387
5388// Thread sampling implementation
5389//
5390void os::SuspendedThreadTask::internal_do_task() {
5391  CONTEXT    ctxt;
5392  HANDLE     h = NULL;
5393
5394  // get context capable handle for thread
5395  get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5396
5397  // sanity
5398  if (h == NULL || h == INVALID_HANDLE_VALUE) {
5399    return;
5400  }
5401
5402  // suspend the thread
5403  if (do_suspend(&h)) {
5404    ctxt.ContextFlags = sampling_context_flags;
5405    // get thread context
5406    GetThreadContext(h, &ctxt);
5407    SuspendedThreadTaskContext context(_thread, &ctxt);
5408    // pass context to Thread Sampling impl
5409    do_task(context);
5410    // resume thread
5411    do_resume(&h);
5412  }
5413
5414  // close handle
5415  CloseHandle(h);
5416}
5417
5418bool os::start_debugging(char *buf, int buflen) {
5419  int len = (int)strlen(buf);
5420  char *p = &buf[len];
5421
5422  jio_snprintf(p, buflen-len,
5423             "\n\n"
5424             "Do you want to debug the problem?\n\n"
5425             "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5426             "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5427             "Otherwise, select 'No' to abort...",
5428             os::current_process_id(), os::current_thread_id());
5429
5430  bool yes = os::message_box("Unexpected Error", buf);
5431
5432  if (yes) {
5433    // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5434    // exception. If VM is running inside a debugger, the debugger will
5435    // catch the exception. Otherwise, the breakpoint exception will reach
5436    // the default windows exception handler, which can spawn a debugger and
5437    // automatically attach to the dying VM.
5438    os::breakpoint();
5439    yes = false;
5440  }
5441  return yes;
5442}
5443
5444void* os::get_default_process_handle() {
5445  return (void*)GetModuleHandle(NULL);
5446}
5447
5448// Builds a platform dependent Agent_OnLoad_<lib_name> function name
5449// which is used to find statically linked in agents.
5450// Additionally for windows, takes into account __stdcall names.
5451// Parameters:
5452//            sym_name: Symbol in library we are looking for
5453//            lib_name: Name of library to look in, NULL for shared libs.
5454//            is_absolute_path == true if lib_name is absolute path to agent
5455//                                     such as "C:/a/b/L.dll"
5456//            == false if only the base name of the library is passed in
5457//               such as "L"
5458char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5459                                    bool is_absolute_path) {
5460  char *agent_entry_name;
5461  size_t len;
5462  size_t name_len;
5463  size_t prefix_len = strlen(JNI_LIB_PREFIX);
5464  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5465  const char *start;
5466
5467  if (lib_name != NULL) {
5468    len = name_len = strlen(lib_name);
5469    if (is_absolute_path) {
5470      // Need to strip path, prefix and suffix
5471      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5472        lib_name = ++start;
5473      } else {
5474        // Need to check for drive prefix
5475        if ((start = strchr(lib_name, ':')) != NULL) {
5476          lib_name = ++start;
5477        }
5478      }
5479      if (len <= (prefix_len + suffix_len)) {
5480        return NULL;
5481      }
5482      lib_name += prefix_len;
5483      name_len = strlen(lib_name) - suffix_len;
5484    }
5485  }
5486  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5487  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5488  if (agent_entry_name == NULL) {
5489    return NULL;
5490  }
5491  if (lib_name != NULL) {
5492    const char *p = strrchr(sym_name, '@');
5493    if (p != NULL && p != sym_name) {
5494      // sym_name == _Agent_OnLoad@XX
5495      strncpy(agent_entry_name, sym_name, (p - sym_name));
5496      agent_entry_name[(p-sym_name)] = '\0';
5497      // agent_entry_name == _Agent_OnLoad
5498      strcat(agent_entry_name, "_");
5499      strncat(agent_entry_name, lib_name, name_len);
5500      strcat(agent_entry_name, p);
5501      // agent_entry_name == _Agent_OnLoad_lib_name@XX
5502    } else {
5503      strcpy(agent_entry_name, sym_name);
5504      strcat(agent_entry_name, "_");
5505      strncat(agent_entry_name, lib_name, name_len);
5506    }
5507  } else {
5508    strcpy(agent_entry_name, sym_name);
5509  }
5510  return agent_entry_name;
5511}
5512
5513#ifndef PRODUCT
5514
5515// test the code path in reserve_memory_special() that tries to allocate memory in a single
5516// contiguous memory block at a particular address.
5517// The test first tries to find a good approximate address to allocate at by using the same
5518// method to allocate some memory at any address. The test then tries to allocate memory in
5519// the vicinity (not directly after it to avoid possible by-chance use of that location)
5520// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5521// the previously allocated memory is available for allocation. The only actual failure
5522// that is reported is when the test tries to allocate at a particular location but gets a
5523// different valid one. A NULL return value at this point is not considered an error but may
5524// be legitimate.
5525// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5526void TestReserveMemorySpecial_test() {
5527  if (!UseLargePages) {
5528    if (VerboseInternalVMTests) {
5529      tty->print("Skipping test because large pages are disabled");
5530    }
5531    return;
5532  }
5533  // save current value of globals
5534  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5535  bool old_use_numa_interleaving = UseNUMAInterleaving;
5536
5537  // set globals to make sure we hit the correct code path
5538  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5539
5540  // do an allocation at an address selected by the OS to get a good one.
5541  const size_t large_allocation_size = os::large_page_size() * 4;
5542  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5543  if (result == NULL) {
5544    if (VerboseInternalVMTests) {
5545      tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5546                          large_allocation_size);
5547    }
5548  } else {
5549    os::release_memory_special(result, large_allocation_size);
5550
5551    // allocate another page within the recently allocated memory area which seems to be a good location. At least
5552    // we managed to get it once.
5553    const size_t expected_allocation_size = os::large_page_size();
5554    char* expected_location = result + os::large_page_size();
5555    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5556    if (actual_location == NULL) {
5557      if (VerboseInternalVMTests) {
5558        tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5559                            expected_location, large_allocation_size);
5560      }
5561    } else {
5562      // release memory
5563      os::release_memory_special(actual_location, expected_allocation_size);
5564      // only now check, after releasing any memory to avoid any leaks.
5565      assert(actual_location == expected_location,
5566             "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5567             expected_location, expected_allocation_size, actual_location);
5568    }
5569  }
5570
5571  // restore globals
5572  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5573  UseNUMAInterleaving = old_use_numa_interleaving;
5574}
5575#endif // PRODUCT
5576
5577/*
5578  All the defined signal names for Windows.
5579
5580  NOTE that not all of these names are accepted by FindSignal!
5581
5582  For various reasons some of these may be rejected at runtime.
5583
5584  Here are the names currently accepted by a user of sun.misc.Signal with
5585  1.4.1 (ignoring potential interaction with use of chaining, etc):
5586
5587     (LIST TBD)
5588
5589*/
5590int os::get_signal_number(const char* name) {
5591  static const struct {
5592    char* name;
5593    int   number;
5594  } siglabels [] =
5595    // derived from version 6.0 VC98/include/signal.h
5596  {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5597  "FPE",        SIGFPE,         // floating point exception
5598  "SEGV",       SIGSEGV,        // segment violation
5599  "INT",        SIGINT,         // interrupt
5600  "TERM",       SIGTERM,        // software term signal from kill
5601  "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5602  "ILL",        SIGILL};        // illegal instruction
5603  for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++)
5604    if(!strcmp(name, siglabels[i].name))
5605      return siglabels[i].number;
5606  return -1;
5607}
5608
5609// Fast current thread access
5610
5611int os::win32::_thread_ptr_offset = 0;
5612
5613static void call_wrapper_dummy() {}
5614
5615// We need to call the os_exception_wrapper once so that it sets
5616// up the offset from FS of the thread pointer.
5617void os::win32::initialize_thread_ptr_offset() {
5618  os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5619                           NULL, NULL, NULL, NULL);
5620}
5621