os_windows.cpp revision 11954:b69381e24635
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
26#define _WIN32_WINNT 0x0600
27
28// no precompiled headers
29#include "classfile/classLoader.hpp"
30#include "classfile/systemDictionary.hpp"
31#include "classfile/vmSymbols.hpp"
32#include "code/icBuffer.hpp"
33#include "code/vtableStubs.hpp"
34#include "compiler/compileBroker.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/interpreter.hpp"
37#include "jvm_windows.h"
38#include "logging/log.hpp"
39#include "memory/allocation.inline.hpp"
40#include "memory/filemap.hpp"
41#include "oops/oop.inline.hpp"
42#include "os_share_windows.hpp"
43#include "os_windows.inline.hpp"
44#include "prims/jniFastGetField.hpp"
45#include "prims/jvm.h"
46#include "prims/jvm_misc.hpp"
47#include "runtime/arguments.hpp"
48#include "runtime/atomic.hpp"
49#include "runtime/extendedPC.hpp"
50#include "runtime/globals.hpp"
51#include "runtime/interfaceSupport.hpp"
52#include "runtime/java.hpp"
53#include "runtime/javaCalls.hpp"
54#include "runtime/mutexLocker.hpp"
55#include "runtime/objectMonitor.hpp"
56#include "runtime/orderAccess.inline.hpp"
57#include "runtime/osThread.hpp"
58#include "runtime/perfMemory.hpp"
59#include "runtime/sharedRuntime.hpp"
60#include "runtime/statSampler.hpp"
61#include "runtime/stubRoutines.hpp"
62#include "runtime/thread.inline.hpp"
63#include "runtime/threadCritical.hpp"
64#include "runtime/timer.hpp"
65#include "runtime/vm_version.hpp"
66#include "semaphore_windows.hpp"
67#include "services/attachListener.hpp"
68#include "services/memTracker.hpp"
69#include "services/runtimeService.hpp"
70#include "utilities/decoder.hpp"
71#include "utilities/defaultStream.hpp"
72#include "utilities/events.hpp"
73#include "utilities/growableArray.hpp"
74#include "utilities/macros.hpp"
75#include "utilities/vmError.hpp"
76
77#ifdef _DEBUG
78#include <crtdbg.h>
79#endif
80
81
82#include <windows.h>
83#include <sys/types.h>
84#include <sys/stat.h>
85#include <sys/timeb.h>
86#include <objidl.h>
87#include <shlobj.h>
88
89#include <malloc.h>
90#include <signal.h>
91#include <direct.h>
92#include <errno.h>
93#include <fcntl.h>
94#include <io.h>
95#include <process.h>              // For _beginthreadex(), _endthreadex()
96#include <imagehlp.h>             // For os::dll_address_to_function_name
97// for enumerating dll libraries
98#include <vdmdbg.h>
99
100// for timer info max values which include all bits
101#define ALL_64_BITS CONST64(-1)
102
103// For DLL loading/load error detection
104// Values of PE COFF
105#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
106#define IMAGE_FILE_SIGNATURE_LENGTH 4
107
108static HANDLE main_process;
109static HANDLE main_thread;
110static int    main_thread_id;
111
112static FILETIME process_creation_time;
113static FILETIME process_exit_time;
114static FILETIME process_user_time;
115static FILETIME process_kernel_time;
116
117#ifdef _M_IA64
118  #define __CPU__ ia64
119#else
120  #ifdef _M_AMD64
121    #define __CPU__ amd64
122  #else
123    #define __CPU__ i486
124  #endif
125#endif
126
127// save DLL module handle, used by GetModuleFileName
128
129HINSTANCE vm_lib_handle;
130
131BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
132  switch (reason) {
133  case DLL_PROCESS_ATTACH:
134    vm_lib_handle = hinst;
135    if (ForceTimeHighResolution) {
136      timeBeginPeriod(1L);
137    }
138    break;
139  case DLL_PROCESS_DETACH:
140    if (ForceTimeHighResolution) {
141      timeEndPeriod(1L);
142    }
143    break;
144  default:
145    break;
146  }
147  return true;
148}
149
150static inline double fileTimeAsDouble(FILETIME* time) {
151  const double high  = (double) ((unsigned int) ~0);
152  const double split = 10000000.0;
153  double result = (time->dwLowDateTime / split) +
154                   time->dwHighDateTime * (high/split);
155  return result;
156}
157
158// Implementation of os
159
160bool os::unsetenv(const char* name) {
161  assert(name != NULL, "Null pointer");
162  return (SetEnvironmentVariable(name, NULL) == TRUE);
163}
164
165// No setuid programs under Windows.
166bool os::have_special_privileges() {
167  return false;
168}
169
170
171// This method is  a periodic task to check for misbehaving JNI applications
172// under CheckJNI, we can add any periodic checks here.
173// For Windows at the moment does nothing
174void os::run_periodic_checks() {
175  return;
176}
177
178// previous UnhandledExceptionFilter, if there is one
179static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
180
181LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
182
183void os::init_system_properties_values() {
184  // sysclasspath, java_home, dll_dir
185  {
186    char *home_path;
187    char *dll_path;
188    char *pslash;
189    char *bin = "\\bin";
190    char home_dir[MAX_PATH + 1];
191    char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
192
193    if (alt_home_dir != NULL)  {
194      strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
195      home_dir[MAX_PATH] = '\0';
196    } else {
197      os::jvm_path(home_dir, sizeof(home_dir));
198      // Found the full path to jvm.dll.
199      // Now cut the path to <java_home>/jre if we can.
200      *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
201      pslash = strrchr(home_dir, '\\');
202      if (pslash != NULL) {
203        *pslash = '\0';                   // get rid of \{client|server}
204        pslash = strrchr(home_dir, '\\');
205        if (pslash != NULL) {
206          *pslash = '\0';                 // get rid of \bin
207        }
208      }
209    }
210
211    home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
212    if (home_path == NULL) {
213      return;
214    }
215    strcpy(home_path, home_dir);
216    Arguments::set_java_home(home_path);
217    FREE_C_HEAP_ARRAY(char, home_path);
218
219    dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
220                                mtInternal);
221    if (dll_path == NULL) {
222      return;
223    }
224    strcpy(dll_path, home_dir);
225    strcat(dll_path, bin);
226    Arguments::set_dll_dir(dll_path);
227    FREE_C_HEAP_ARRAY(char, dll_path);
228
229    if (!set_boot_path('\\', ';')) {
230      return;
231    }
232  }
233
234// library_path
235#define EXT_DIR "\\lib\\ext"
236#define BIN_DIR "\\bin"
237#define PACKAGE_DIR "\\Sun\\Java"
238  {
239    // Win32 library search order (See the documentation for LoadLibrary):
240    //
241    // 1. The directory from which application is loaded.
242    // 2. The system wide Java Extensions directory (Java only)
243    // 3. System directory (GetSystemDirectory)
244    // 4. Windows directory (GetWindowsDirectory)
245    // 5. The PATH environment variable
246    // 6. The current directory
247
248    char *library_path;
249    char tmp[MAX_PATH];
250    char *path_str = ::getenv("PATH");
251
252    library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
253                                    sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
254
255    library_path[0] = '\0';
256
257    GetModuleFileName(NULL, tmp, sizeof(tmp));
258    *(strrchr(tmp, '\\')) = '\0';
259    strcat(library_path, tmp);
260
261    GetWindowsDirectory(tmp, sizeof(tmp));
262    strcat(library_path, ";");
263    strcat(library_path, tmp);
264    strcat(library_path, PACKAGE_DIR BIN_DIR);
265
266    GetSystemDirectory(tmp, sizeof(tmp));
267    strcat(library_path, ";");
268    strcat(library_path, tmp);
269
270    GetWindowsDirectory(tmp, sizeof(tmp));
271    strcat(library_path, ";");
272    strcat(library_path, tmp);
273
274    if (path_str) {
275      strcat(library_path, ";");
276      strcat(library_path, path_str);
277    }
278
279    strcat(library_path, ";.");
280
281    Arguments::set_library_path(library_path);
282    FREE_C_HEAP_ARRAY(char, library_path);
283  }
284
285  // Default extensions directory
286  {
287    char path[MAX_PATH];
288    char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
289    GetWindowsDirectory(path, MAX_PATH);
290    sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
291            path, PACKAGE_DIR, EXT_DIR);
292    Arguments::set_ext_dirs(buf);
293  }
294  #undef EXT_DIR
295  #undef BIN_DIR
296  #undef PACKAGE_DIR
297
298#ifndef _WIN64
299  // set our UnhandledExceptionFilter and save any previous one
300  prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
301#endif
302
303  // Done
304  return;
305}
306
307void os::breakpoint() {
308  DebugBreak();
309}
310
311// Invoked from the BREAKPOINT Macro
312extern "C" void breakpoint() {
313  os::breakpoint();
314}
315
316// RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
317// So far, this method is only used by Native Memory Tracking, which is
318// only supported on Windows XP or later.
319//
320int os::get_native_stack(address* stack, int frames, int toSkip) {
321  int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
322  for (int index = captured; index < frames; index ++) {
323    stack[index] = NULL;
324  }
325  return captured;
326}
327
328
329// os::current_stack_base()
330//
331//   Returns the base of the stack, which is the stack's
332//   starting address.  This function must be called
333//   while running on the stack of the thread being queried.
334
335address os::current_stack_base() {
336  MEMORY_BASIC_INFORMATION minfo;
337  address stack_bottom;
338  size_t stack_size;
339
340  VirtualQuery(&minfo, &minfo, sizeof(minfo));
341  stack_bottom =  (address)minfo.AllocationBase;
342  stack_size = minfo.RegionSize;
343
344  // Add up the sizes of all the regions with the same
345  // AllocationBase.
346  while (1) {
347    VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
348    if (stack_bottom == (address)minfo.AllocationBase) {
349      stack_size += minfo.RegionSize;
350    } else {
351      break;
352    }
353  }
354
355#ifdef _M_IA64
356  // IA64 has memory and register stacks
357  //
358  // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
359  // at thread creation (1MB backing store growing upwards, 1MB memory stack
360  // growing downwards, 2MB summed up)
361  //
362  // ...
363  // ------- top of stack (high address) -----
364  // |
365  // |      1MB
366  // |      Backing Store (Register Stack)
367  // |
368  // |         / \
369  // |          |
370  // |          |
371  // |          |
372  // ------------------------ stack base -----
373  // |      1MB
374  // |      Memory Stack
375  // |
376  // |          |
377  // |          |
378  // |          |
379  // |         \ /
380  // |
381  // ----- bottom of stack (low address) -----
382  // ...
383
384  stack_size = stack_size / 2;
385#endif
386  return stack_bottom + stack_size;
387}
388
389size_t os::current_stack_size() {
390  size_t sz;
391  MEMORY_BASIC_INFORMATION minfo;
392  VirtualQuery(&minfo, &minfo, sizeof(minfo));
393  sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
394  return sz;
395}
396
397struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
398  const struct tm* time_struct_ptr = localtime(clock);
399  if (time_struct_ptr != NULL) {
400    *res = *time_struct_ptr;
401    return res;
402  }
403  return NULL;
404}
405
406LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
407
408// Thread start routine for all newly created threads
409static unsigned __stdcall thread_native_entry(Thread* thread) {
410  // Try to randomize the cache line index of hot stack frames.
411  // This helps when threads of the same stack traces evict each other's
412  // cache lines. The threads can be either from the same JVM instance, or
413  // from different JVM instances. The benefit is especially true for
414  // processors with hyperthreading technology.
415  static int counter = 0;
416  int pid = os::current_process_id();
417  _alloca(((pid ^ counter++) & 7) * 128);
418
419  thread->initialize_thread_current();
420
421  OSThread* osthr = thread->osthread();
422  assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
423
424  if (UseNUMA) {
425    int lgrp_id = os::numa_get_group_id();
426    if (lgrp_id != -1) {
427      thread->set_lgrp_id(lgrp_id);
428    }
429  }
430
431  // Diagnostic code to investigate JDK-6573254
432  int res = 30115;  // non-java thread
433  if (thread->is_Java_thread()) {
434    res = 20115;    // java thread
435  }
436
437  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
438
439  // Install a win32 structured exception handler around every thread created
440  // by VM, so VM can generate error dump when an exception occurred in non-
441  // Java thread (e.g. VM thread).
442  __try {
443    thread->run();
444  } __except(topLevelExceptionFilter(
445                                     (_EXCEPTION_POINTERS*)_exception_info())) {
446    // Nothing to do.
447  }
448
449  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
450
451  // One less thread is executing
452  // When the VMThread gets here, the main thread may have already exited
453  // which frees the CodeHeap containing the Atomic::add code
454  if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
455    Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
456  }
457
458  // If a thread has not deleted itself ("delete this") as part of its
459  // termination sequence, we have to ensure thread-local-storage is
460  // cleared before we actually terminate. No threads should ever be
461  // deleted asynchronously with respect to their termination.
462  if (Thread::current_or_null_safe() != NULL) {
463    assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
464    thread->clear_thread_current();
465  }
466
467  // Thread must not return from exit_process_or_thread(), but if it does,
468  // let it proceed to exit normally
469  return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
470}
471
472static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
473                                  int thread_id) {
474  // Allocate the OSThread object
475  OSThread* osthread = new OSThread(NULL, NULL);
476  if (osthread == NULL) return NULL;
477
478  // Initialize support for Java interrupts
479  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
480  if (interrupt_event == NULL) {
481    delete osthread;
482    return NULL;
483  }
484  osthread->set_interrupt_event(interrupt_event);
485
486  // Store info on the Win32 thread into the OSThread
487  osthread->set_thread_handle(thread_handle);
488  osthread->set_thread_id(thread_id);
489
490  if (UseNUMA) {
491    int lgrp_id = os::numa_get_group_id();
492    if (lgrp_id != -1) {
493      thread->set_lgrp_id(lgrp_id);
494    }
495  }
496
497  // Initial thread state is INITIALIZED, not SUSPENDED
498  osthread->set_state(INITIALIZED);
499
500  return osthread;
501}
502
503
504bool os::create_attached_thread(JavaThread* thread) {
505#ifdef ASSERT
506  thread->verify_not_published();
507#endif
508  HANDLE thread_h;
509  if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
510                       &thread_h, THREAD_ALL_ACCESS, false, 0)) {
511    fatal("DuplicateHandle failed\n");
512  }
513  OSThread* osthread = create_os_thread(thread, thread_h,
514                                        (int)current_thread_id());
515  if (osthread == NULL) {
516    return false;
517  }
518
519  // Initial thread state is RUNNABLE
520  osthread->set_state(RUNNABLE);
521
522  thread->set_osthread(osthread);
523
524  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
525    os::current_thread_id());
526
527  return true;
528}
529
530bool os::create_main_thread(JavaThread* thread) {
531#ifdef ASSERT
532  thread->verify_not_published();
533#endif
534  if (_starting_thread == NULL) {
535    _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
536    if (_starting_thread == NULL) {
537      return false;
538    }
539  }
540
541  // The primordial thread is runnable from the start)
542  _starting_thread->set_state(RUNNABLE);
543
544  thread->set_osthread(_starting_thread);
545  return true;
546}
547
548// Helper function to trace _beginthreadex attributes,
549//  similar to os::Posix::describe_pthread_attr()
550static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
551                                               size_t stacksize, unsigned initflag) {
552  stringStream ss(buf, buflen);
553  if (stacksize == 0) {
554    ss.print("stacksize: default, ");
555  } else {
556    ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
557  }
558  ss.print("flags: ");
559  #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
560  #define ALL(X) \
561    X(CREATE_SUSPENDED) \
562    X(STACK_SIZE_PARAM_IS_A_RESERVATION)
563  ALL(PRINT_FLAG)
564  #undef ALL
565  #undef PRINT_FLAG
566  return buf;
567}
568
569// Allocate and initialize a new OSThread
570bool os::create_thread(Thread* thread, ThreadType thr_type,
571                       size_t stack_size) {
572  unsigned thread_id;
573
574  // Allocate the OSThread object
575  OSThread* osthread = new OSThread(NULL, NULL);
576  if (osthread == NULL) {
577    return false;
578  }
579
580  // Initialize support for Java interrupts
581  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
582  if (interrupt_event == NULL) {
583    delete osthread;
584    return NULL;
585  }
586  osthread->set_interrupt_event(interrupt_event);
587  osthread->set_interrupted(false);
588
589  thread->set_osthread(osthread);
590
591  if (stack_size == 0) {
592    switch (thr_type) {
593    case os::java_thread:
594      // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
595      if (JavaThread::stack_size_at_create() > 0) {
596        stack_size = JavaThread::stack_size_at_create();
597      }
598      break;
599    case os::compiler_thread:
600      if (CompilerThreadStackSize > 0) {
601        stack_size = (size_t)(CompilerThreadStackSize * K);
602        break;
603      } // else fall through:
604        // use VMThreadStackSize if CompilerThreadStackSize is not defined
605    case os::vm_thread:
606    case os::pgc_thread:
607    case os::cgc_thread:
608    case os::watcher_thread:
609      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
610      break;
611    }
612  }
613
614  // Create the Win32 thread
615  //
616  // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
617  // does not specify stack size. Instead, it specifies the size of
618  // initially committed space. The stack size is determined by
619  // PE header in the executable. If the committed "stack_size" is larger
620  // than default value in the PE header, the stack is rounded up to the
621  // nearest multiple of 1MB. For example if the launcher has default
622  // stack size of 320k, specifying any size less than 320k does not
623  // affect the actual stack size at all, it only affects the initial
624  // commitment. On the other hand, specifying 'stack_size' larger than
625  // default value may cause significant increase in memory usage, because
626  // not only the stack space will be rounded up to MB, but also the
627  // entire space is committed upfront.
628  //
629  // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
630  // for CreateThread() that can treat 'stack_size' as stack size. However we
631  // are not supposed to call CreateThread() directly according to MSDN
632  // document because JVM uses C runtime library. The good news is that the
633  // flag appears to work with _beginthredex() as well.
634
635  const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
636  HANDLE thread_handle =
637    (HANDLE)_beginthreadex(NULL,
638                           (unsigned)stack_size,
639                           (unsigned (__stdcall *)(void*)) thread_native_entry,
640                           thread,
641                           initflag,
642                           &thread_id);
643
644  char buf[64];
645  if (thread_handle != NULL) {
646    log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
647      thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
648  } else {
649    log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
650      os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
651  }
652
653  if (thread_handle == NULL) {
654    // Need to clean up stuff we've allocated so far
655    CloseHandle(osthread->interrupt_event());
656    thread->set_osthread(NULL);
657    delete osthread;
658    return NULL;
659  }
660
661  Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
662
663  // Store info on the Win32 thread into the OSThread
664  osthread->set_thread_handle(thread_handle);
665  osthread->set_thread_id(thread_id);
666
667  // Initial thread state is INITIALIZED, not SUSPENDED
668  osthread->set_state(INITIALIZED);
669
670  // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
671  return true;
672}
673
674
675// Free Win32 resources related to the OSThread
676void os::free_thread(OSThread* osthread) {
677  assert(osthread != NULL, "osthread not set");
678
679  // We are told to free resources of the argument thread,
680  // but we can only really operate on the current thread.
681  assert(Thread::current()->osthread() == osthread,
682         "os::free_thread but not current thread");
683
684  CloseHandle(osthread->thread_handle());
685  CloseHandle(osthread->interrupt_event());
686  delete osthread;
687}
688
689static jlong first_filetime;
690static jlong initial_performance_count;
691static jlong performance_frequency;
692
693
694jlong as_long(LARGE_INTEGER x) {
695  jlong result = 0; // initialization to avoid warning
696  set_high(&result, x.HighPart);
697  set_low(&result, x.LowPart);
698  return result;
699}
700
701
702jlong os::elapsed_counter() {
703  LARGE_INTEGER count;
704  QueryPerformanceCounter(&count);
705  return as_long(count) - initial_performance_count;
706}
707
708
709jlong os::elapsed_frequency() {
710  return performance_frequency;
711}
712
713
714julong os::available_memory() {
715  return win32::available_memory();
716}
717
718julong os::win32::available_memory() {
719  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
720  // value if total memory is larger than 4GB
721  MEMORYSTATUSEX ms;
722  ms.dwLength = sizeof(ms);
723  GlobalMemoryStatusEx(&ms);
724
725  return (julong)ms.ullAvailPhys;
726}
727
728julong os::physical_memory() {
729  return win32::physical_memory();
730}
731
732bool os::has_allocatable_memory_limit(julong* limit) {
733  MEMORYSTATUSEX ms;
734  ms.dwLength = sizeof(ms);
735  GlobalMemoryStatusEx(&ms);
736#ifdef _LP64
737  *limit = (julong)ms.ullAvailVirtual;
738  return true;
739#else
740  // Limit to 1400m because of the 2gb address space wall
741  *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
742  return true;
743#endif
744}
745
746int os::active_processor_count() {
747  DWORD_PTR lpProcessAffinityMask = 0;
748  DWORD_PTR lpSystemAffinityMask = 0;
749  int proc_count = processor_count();
750  if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
751      GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
752    // Nof active processors is number of bits in process affinity mask
753    int bitcount = 0;
754    while (lpProcessAffinityMask != 0) {
755      lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
756      bitcount++;
757    }
758    return bitcount;
759  } else {
760    return proc_count;
761  }
762}
763
764void os::set_native_thread_name(const char *name) {
765
766  // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
767  //
768  // Note that unfortunately this only works if the process
769  // is already attached to a debugger; debugger must observe
770  // the exception below to show the correct name.
771
772  const DWORD MS_VC_EXCEPTION = 0x406D1388;
773  struct {
774    DWORD dwType;     // must be 0x1000
775    LPCSTR szName;    // pointer to name (in user addr space)
776    DWORD dwThreadID; // thread ID (-1=caller thread)
777    DWORD dwFlags;    // reserved for future use, must be zero
778  } info;
779
780  info.dwType = 0x1000;
781  info.szName = name;
782  info.dwThreadID = -1;
783  info.dwFlags = 0;
784
785  __try {
786    RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
787  } __except(EXCEPTION_CONTINUE_EXECUTION) {}
788}
789
790bool os::distribute_processes(uint length, uint* distribution) {
791  // Not yet implemented.
792  return false;
793}
794
795bool os::bind_to_processor(uint processor_id) {
796  // Not yet implemented.
797  return false;
798}
799
800void os::win32::initialize_performance_counter() {
801  LARGE_INTEGER count;
802  QueryPerformanceFrequency(&count);
803  performance_frequency = as_long(count);
804  QueryPerformanceCounter(&count);
805  initial_performance_count = as_long(count);
806}
807
808
809double os::elapsedTime() {
810  return (double) elapsed_counter() / (double) elapsed_frequency();
811}
812
813
814// Windows format:
815//   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
816// Java format:
817//   Java standards require the number of milliseconds since 1/1/1970
818
819// Constant offset - calculated using offset()
820static jlong  _offset   = 116444736000000000;
821// Fake time counter for reproducible results when debugging
822static jlong  fake_time = 0;
823
824#ifdef ASSERT
825// Just to be safe, recalculate the offset in debug mode
826static jlong _calculated_offset = 0;
827static int   _has_calculated_offset = 0;
828
829jlong offset() {
830  if (_has_calculated_offset) return _calculated_offset;
831  SYSTEMTIME java_origin;
832  java_origin.wYear          = 1970;
833  java_origin.wMonth         = 1;
834  java_origin.wDayOfWeek     = 0; // ignored
835  java_origin.wDay           = 1;
836  java_origin.wHour          = 0;
837  java_origin.wMinute        = 0;
838  java_origin.wSecond        = 0;
839  java_origin.wMilliseconds  = 0;
840  FILETIME jot;
841  if (!SystemTimeToFileTime(&java_origin, &jot)) {
842    fatal("Error = %d\nWindows error", GetLastError());
843  }
844  _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
845  _has_calculated_offset = 1;
846  assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
847  return _calculated_offset;
848}
849#else
850jlong offset() {
851  return _offset;
852}
853#endif
854
855jlong windows_to_java_time(FILETIME wt) {
856  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
857  return (a - offset()) / 10000;
858}
859
860// Returns time ticks in (10th of micro seconds)
861jlong windows_to_time_ticks(FILETIME wt) {
862  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
863  return (a - offset());
864}
865
866FILETIME java_to_windows_time(jlong l) {
867  jlong a = (l * 10000) + offset();
868  FILETIME result;
869  result.dwHighDateTime = high(a);
870  result.dwLowDateTime  = low(a);
871  return result;
872}
873
874bool os::supports_vtime() { return true; }
875bool os::enable_vtime() { return false; }
876bool os::vtime_enabled() { return false; }
877
878double os::elapsedVTime() {
879  FILETIME created;
880  FILETIME exited;
881  FILETIME kernel;
882  FILETIME user;
883  if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
884    // the resolution of windows_to_java_time() should be sufficient (ms)
885    return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
886  } else {
887    return elapsedTime();
888  }
889}
890
891jlong os::javaTimeMillis() {
892  if (UseFakeTimers) {
893    return fake_time++;
894  } else {
895    FILETIME wt;
896    GetSystemTimeAsFileTime(&wt);
897    return windows_to_java_time(wt);
898  }
899}
900
901void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
902  FILETIME wt;
903  GetSystemTimeAsFileTime(&wt);
904  jlong ticks = windows_to_time_ticks(wt); // 10th of micros
905  jlong secs = jlong(ticks / 10000000); // 10000 * 1000
906  seconds = secs;
907  nanos = jlong(ticks - (secs*10000000)) * 100;
908}
909
910jlong os::javaTimeNanos() {
911    LARGE_INTEGER current_count;
912    QueryPerformanceCounter(&current_count);
913    double current = as_long(current_count);
914    double freq = performance_frequency;
915    jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
916    return time;
917}
918
919void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
920  jlong freq = performance_frequency;
921  if (freq < NANOSECS_PER_SEC) {
922    // the performance counter is 64 bits and we will
923    // be multiplying it -- so no wrap in 64 bits
924    info_ptr->max_value = ALL_64_BITS;
925  } else if (freq > NANOSECS_PER_SEC) {
926    // use the max value the counter can reach to
927    // determine the max value which could be returned
928    julong max_counter = (julong)ALL_64_BITS;
929    info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
930  } else {
931    // the performance counter is 64 bits and we will
932    // be using it directly -- so no wrap in 64 bits
933    info_ptr->max_value = ALL_64_BITS;
934  }
935
936  // using a counter, so no skipping
937  info_ptr->may_skip_backward = false;
938  info_ptr->may_skip_forward = false;
939
940  info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
941}
942
943char* os::local_time_string(char *buf, size_t buflen) {
944  SYSTEMTIME st;
945  GetLocalTime(&st);
946  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
947               st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
948  return buf;
949}
950
951bool os::getTimesSecs(double* process_real_time,
952                      double* process_user_time,
953                      double* process_system_time) {
954  HANDLE h_process = GetCurrentProcess();
955  FILETIME create_time, exit_time, kernel_time, user_time;
956  BOOL result = GetProcessTimes(h_process,
957                                &create_time,
958                                &exit_time,
959                                &kernel_time,
960                                &user_time);
961  if (result != 0) {
962    FILETIME wt;
963    GetSystemTimeAsFileTime(&wt);
964    jlong rtc_millis = windows_to_java_time(wt);
965    *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
966    *process_user_time =
967      (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
968    *process_system_time =
969      (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
970    return true;
971  } else {
972    return false;
973  }
974}
975
976void os::shutdown() {
977  // allow PerfMemory to attempt cleanup of any persistent resources
978  perfMemory_exit();
979
980  // flush buffered output, finish log files
981  ostream_abort();
982
983  // Check for abort hook
984  abort_hook_t abort_hook = Arguments::abort_hook();
985  if (abort_hook != NULL) {
986    abort_hook();
987  }
988}
989
990
991static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
992                                         PMINIDUMP_EXCEPTION_INFORMATION,
993                                         PMINIDUMP_USER_STREAM_INFORMATION,
994                                         PMINIDUMP_CALLBACK_INFORMATION);
995
996static HANDLE dumpFile = NULL;
997
998// Check if dump file can be created.
999void os::check_dump_limit(char* buffer, size_t buffsz) {
1000  bool status = true;
1001  if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1002    jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1003    status = false;
1004  }
1005
1006#ifndef ASSERT
1007  if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1008    jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1009    status = false;
1010  }
1011#endif
1012
1013  if (status) {
1014    const char* cwd = get_current_directory(NULL, 0);
1015    int pid = current_process_id();
1016    if (cwd != NULL) {
1017      jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1018    } else {
1019      jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1020    }
1021
1022    if (dumpFile == NULL &&
1023       (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1024                 == INVALID_HANDLE_VALUE) {
1025      jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1026      status = false;
1027    }
1028  }
1029  VMError::record_coredump_status(buffer, status);
1030}
1031
1032void os::abort(bool dump_core, void* siginfo, const void* context) {
1033  HINSTANCE dbghelp;
1034  EXCEPTION_POINTERS ep;
1035  MINIDUMP_EXCEPTION_INFORMATION mei;
1036  MINIDUMP_EXCEPTION_INFORMATION* pmei;
1037
1038  HANDLE hProcess = GetCurrentProcess();
1039  DWORD processId = GetCurrentProcessId();
1040  MINIDUMP_TYPE dumpType;
1041
1042  shutdown();
1043  if (!dump_core || dumpFile == NULL) {
1044    if (dumpFile != NULL) {
1045      CloseHandle(dumpFile);
1046    }
1047    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1048  }
1049
1050  dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1051
1052  if (dbghelp == NULL) {
1053    jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1054    CloseHandle(dumpFile);
1055    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1056  }
1057
1058  _MiniDumpWriteDump =
1059      CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1060                                    PMINIDUMP_EXCEPTION_INFORMATION,
1061                                    PMINIDUMP_USER_STREAM_INFORMATION,
1062                                    PMINIDUMP_CALLBACK_INFORMATION),
1063                                    GetProcAddress(dbghelp,
1064                                    "MiniDumpWriteDump"));
1065
1066  if (_MiniDumpWriteDump == NULL) {
1067    jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1068    CloseHandle(dumpFile);
1069    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1070  }
1071
1072  dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1073    MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1074
1075  if (siginfo != NULL && context != NULL) {
1076    ep.ContextRecord = (PCONTEXT) context;
1077    ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1078
1079    mei.ThreadId = GetCurrentThreadId();
1080    mei.ExceptionPointers = &ep;
1081    pmei = &mei;
1082  } else {
1083    pmei = NULL;
1084  }
1085
1086  // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1087  // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1088  if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1089      _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1090    jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1091  }
1092  CloseHandle(dumpFile);
1093  win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1094}
1095
1096// Die immediately, no exit hook, no abort hook, no cleanup.
1097void os::die() {
1098  win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1099}
1100
1101// Directory routines copied from src/win32/native/java/io/dirent_md.c
1102//  * dirent_md.c       1.15 00/02/02
1103//
1104// The declarations for DIR and struct dirent are in jvm_win32.h.
1105
1106// Caller must have already run dirname through JVM_NativePath, which removes
1107// duplicate slashes and converts all instances of '/' into '\\'.
1108
1109DIR * os::opendir(const char *dirname) {
1110  assert(dirname != NULL, "just checking");   // hotspot change
1111  DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1112  DWORD fattr;                                // hotspot change
1113  char alt_dirname[4] = { 0, 0, 0, 0 };
1114
1115  if (dirp == 0) {
1116    errno = ENOMEM;
1117    return 0;
1118  }
1119
1120  // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1121  // as a directory in FindFirstFile().  We detect this case here and
1122  // prepend the current drive name.
1123  //
1124  if (dirname[1] == '\0' && dirname[0] == '\\') {
1125    alt_dirname[0] = _getdrive() + 'A' - 1;
1126    alt_dirname[1] = ':';
1127    alt_dirname[2] = '\\';
1128    alt_dirname[3] = '\0';
1129    dirname = alt_dirname;
1130  }
1131
1132  dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1133  if (dirp->path == 0) {
1134    free(dirp);
1135    errno = ENOMEM;
1136    return 0;
1137  }
1138  strcpy(dirp->path, dirname);
1139
1140  fattr = GetFileAttributes(dirp->path);
1141  if (fattr == 0xffffffff) {
1142    free(dirp->path);
1143    free(dirp);
1144    errno = ENOENT;
1145    return 0;
1146  } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1147    free(dirp->path);
1148    free(dirp);
1149    errno = ENOTDIR;
1150    return 0;
1151  }
1152
1153  // Append "*.*", or possibly "\\*.*", to path
1154  if (dirp->path[1] == ':' &&
1155      (dirp->path[2] == '\0' ||
1156      (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1157    // No '\\' needed for cases like "Z:" or "Z:\"
1158    strcat(dirp->path, "*.*");
1159  } else {
1160    strcat(dirp->path, "\\*.*");
1161  }
1162
1163  dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1164  if (dirp->handle == INVALID_HANDLE_VALUE) {
1165    if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1166      free(dirp->path);
1167      free(dirp);
1168      errno = EACCES;
1169      return 0;
1170    }
1171  }
1172  return dirp;
1173}
1174
1175// parameter dbuf unused on Windows
1176struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1177  assert(dirp != NULL, "just checking");      // hotspot change
1178  if (dirp->handle == INVALID_HANDLE_VALUE) {
1179    return 0;
1180  }
1181
1182  strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1183
1184  if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1185    if (GetLastError() == ERROR_INVALID_HANDLE) {
1186      errno = EBADF;
1187      return 0;
1188    }
1189    FindClose(dirp->handle);
1190    dirp->handle = INVALID_HANDLE_VALUE;
1191  }
1192
1193  return &dirp->dirent;
1194}
1195
1196int os::closedir(DIR *dirp) {
1197  assert(dirp != NULL, "just checking");      // hotspot change
1198  if (dirp->handle != INVALID_HANDLE_VALUE) {
1199    if (!FindClose(dirp->handle)) {
1200      errno = EBADF;
1201      return -1;
1202    }
1203    dirp->handle = INVALID_HANDLE_VALUE;
1204  }
1205  free(dirp->path);
1206  free(dirp);
1207  return 0;
1208}
1209
1210// This must be hard coded because it's the system's temporary
1211// directory not the java application's temp directory, ala java.io.tmpdir.
1212const char* os::get_temp_directory() {
1213  static char path_buf[MAX_PATH];
1214  if (GetTempPath(MAX_PATH, path_buf) > 0) {
1215    return path_buf;
1216  } else {
1217    path_buf[0] = '\0';
1218    return path_buf;
1219  }
1220}
1221
1222static bool file_exists(const char* filename) {
1223  if (filename == NULL || strlen(filename) == 0) {
1224    return false;
1225  }
1226  return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1227}
1228
1229bool os::dll_build_name(char *buffer, size_t buflen,
1230                        const char* pname, const char* fname) {
1231  bool retval = false;
1232  const size_t pnamelen = pname ? strlen(pname) : 0;
1233  const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1234
1235  // Return error on buffer overflow.
1236  if (pnamelen + strlen(fname) + 10 > buflen) {
1237    return retval;
1238  }
1239
1240  if (pnamelen == 0) {
1241    jio_snprintf(buffer, buflen, "%s.dll", fname);
1242    retval = true;
1243  } else if (c == ':' || c == '\\') {
1244    jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1245    retval = true;
1246  } else if (strchr(pname, *os::path_separator()) != NULL) {
1247    int n;
1248    char** pelements = split_path(pname, &n);
1249    if (pelements == NULL) {
1250      return false;
1251    }
1252    for (int i = 0; i < n; i++) {
1253      char* path = pelements[i];
1254      // Really shouldn't be NULL, but check can't hurt
1255      size_t plen = (path == NULL) ? 0 : strlen(path);
1256      if (plen == 0) {
1257        continue; // skip the empty path values
1258      }
1259      const char lastchar = path[plen - 1];
1260      if (lastchar == ':' || lastchar == '\\') {
1261        jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1262      } else {
1263        jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1264      }
1265      if (file_exists(buffer)) {
1266        retval = true;
1267        break;
1268      }
1269    }
1270    // release the storage
1271    for (int i = 0; i < n; i++) {
1272      if (pelements[i] != NULL) {
1273        FREE_C_HEAP_ARRAY(char, pelements[i]);
1274      }
1275    }
1276    if (pelements != NULL) {
1277      FREE_C_HEAP_ARRAY(char*, pelements);
1278    }
1279  } else {
1280    jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1281    retval = true;
1282  }
1283  return retval;
1284}
1285
1286// Needs to be in os specific directory because windows requires another
1287// header file <direct.h>
1288const char* os::get_current_directory(char *buf, size_t buflen) {
1289  int n = static_cast<int>(buflen);
1290  if (buflen > INT_MAX)  n = INT_MAX;
1291  return _getcwd(buf, n);
1292}
1293
1294//-----------------------------------------------------------
1295// Helper functions for fatal error handler
1296#ifdef _WIN64
1297// Helper routine which returns true if address in
1298// within the NTDLL address space.
1299//
1300static bool _addr_in_ntdll(address addr) {
1301  HMODULE hmod;
1302  MODULEINFO minfo;
1303
1304  hmod = GetModuleHandle("NTDLL.DLL");
1305  if (hmod == NULL) return false;
1306  if (!GetModuleInformation(GetCurrentProcess(), hmod,
1307                                          &minfo, sizeof(MODULEINFO))) {
1308    return false;
1309  }
1310
1311  if ((addr >= minfo.lpBaseOfDll) &&
1312      (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1313    return true;
1314  } else {
1315    return false;
1316  }
1317}
1318#endif
1319
1320struct _modinfo {
1321  address addr;
1322  char*   full_path;   // point to a char buffer
1323  int     buflen;      // size of the buffer
1324  address base_addr;
1325};
1326
1327static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1328                                  address top_address, void * param) {
1329  struct _modinfo *pmod = (struct _modinfo *)param;
1330  if (!pmod) return -1;
1331
1332  if (base_addr   <= pmod->addr &&
1333      top_address > pmod->addr) {
1334    // if a buffer is provided, copy path name to the buffer
1335    if (pmod->full_path) {
1336      jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1337    }
1338    pmod->base_addr = base_addr;
1339    return 1;
1340  }
1341  return 0;
1342}
1343
1344bool os::dll_address_to_library_name(address addr, char* buf,
1345                                     int buflen, int* offset) {
1346  // buf is not optional, but offset is optional
1347  assert(buf != NULL, "sanity check");
1348
1349// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1350//       return the full path to the DLL file, sometimes it returns path
1351//       to the corresponding PDB file (debug info); sometimes it only
1352//       returns partial path, which makes life painful.
1353
1354  struct _modinfo mi;
1355  mi.addr      = addr;
1356  mi.full_path = buf;
1357  mi.buflen    = buflen;
1358  if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1359    // buf already contains path name
1360    if (offset) *offset = addr - mi.base_addr;
1361    return true;
1362  }
1363
1364  buf[0] = '\0';
1365  if (offset) *offset = -1;
1366  return false;
1367}
1368
1369bool os::dll_address_to_function_name(address addr, char *buf,
1370                                      int buflen, int *offset,
1371                                      bool demangle) {
1372  // buf is not optional, but offset is optional
1373  assert(buf != NULL, "sanity check");
1374
1375  if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1376    return true;
1377  }
1378  if (offset != NULL)  *offset  = -1;
1379  buf[0] = '\0';
1380  return false;
1381}
1382
1383// save the start and end address of jvm.dll into param[0] and param[1]
1384static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1385                           address top_address, void * param) {
1386  if (!param) return -1;
1387
1388  if (base_addr   <= (address)_locate_jvm_dll &&
1389      top_address > (address)_locate_jvm_dll) {
1390    ((address*)param)[0] = base_addr;
1391    ((address*)param)[1] = top_address;
1392    return 1;
1393  }
1394  return 0;
1395}
1396
1397address vm_lib_location[2];    // start and end address of jvm.dll
1398
1399// check if addr is inside jvm.dll
1400bool os::address_is_in_vm(address addr) {
1401  if (!vm_lib_location[0] || !vm_lib_location[1]) {
1402    if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1403      assert(false, "Can't find jvm module.");
1404      return false;
1405    }
1406  }
1407
1408  return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1409}
1410
1411// print module info; param is outputStream*
1412static int _print_module(const char* fname, address base_address,
1413                         address top_address, void* param) {
1414  if (!param) return -1;
1415
1416  outputStream* st = (outputStream*)param;
1417
1418  st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1419  return 0;
1420}
1421
1422// Loads .dll/.so and
1423// in case of error it checks if .dll/.so was built for the
1424// same architecture as Hotspot is running on
1425void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1426  void * result = LoadLibrary(name);
1427  if (result != NULL) {
1428    return result;
1429  }
1430
1431  DWORD errcode = GetLastError();
1432  if (errcode == ERROR_MOD_NOT_FOUND) {
1433    strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1434    ebuf[ebuflen - 1] = '\0';
1435    return NULL;
1436  }
1437
1438  // Parsing dll below
1439  // If we can read dll-info and find that dll was built
1440  // for an architecture other than Hotspot is running in
1441  // - then print to buffer "DLL was built for a different architecture"
1442  // else call os::lasterror to obtain system error message
1443
1444  // Read system error message into ebuf
1445  // It may or may not be overwritten below (in the for loop and just above)
1446  lasterror(ebuf, (size_t) ebuflen);
1447  ebuf[ebuflen - 1] = '\0';
1448  int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1449  if (fd < 0) {
1450    return NULL;
1451  }
1452
1453  uint32_t signature_offset;
1454  uint16_t lib_arch = 0;
1455  bool failed_to_get_lib_arch =
1456    ( // Go to position 3c in the dll
1457     (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1458     ||
1459     // Read location of signature
1460     (sizeof(signature_offset) !=
1461     (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1462     ||
1463     // Go to COFF File Header in dll
1464     // that is located after "signature" (4 bytes long)
1465     (os::seek_to_file_offset(fd,
1466     signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1467     ||
1468     // Read field that contains code of architecture
1469     // that dll was built for
1470     (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1471    );
1472
1473  ::close(fd);
1474  if (failed_to_get_lib_arch) {
1475    // file i/o error - report os::lasterror(...) msg
1476    return NULL;
1477  }
1478
1479  typedef struct {
1480    uint16_t arch_code;
1481    char* arch_name;
1482  } arch_t;
1483
1484  static const arch_t arch_array[] = {
1485    {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1486    {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1487    {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1488  };
1489#if   (defined _M_IA64)
1490  static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1491#elif (defined _M_AMD64)
1492  static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1493#elif (defined _M_IX86)
1494  static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1495#else
1496  #error Method os::dll_load requires that one of following \
1497         is defined :_M_IA64,_M_AMD64 or _M_IX86
1498#endif
1499
1500
1501  // Obtain a string for printf operation
1502  // lib_arch_str shall contain string what platform this .dll was built for
1503  // running_arch_str shall string contain what platform Hotspot was built for
1504  char *running_arch_str = NULL, *lib_arch_str = NULL;
1505  for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1506    if (lib_arch == arch_array[i].arch_code) {
1507      lib_arch_str = arch_array[i].arch_name;
1508    }
1509    if (running_arch == arch_array[i].arch_code) {
1510      running_arch_str = arch_array[i].arch_name;
1511    }
1512  }
1513
1514  assert(running_arch_str,
1515         "Didn't find running architecture code in arch_array");
1516
1517  // If the architecture is right
1518  // but some other error took place - report os::lasterror(...) msg
1519  if (lib_arch == running_arch) {
1520    return NULL;
1521  }
1522
1523  if (lib_arch_str != NULL) {
1524    ::_snprintf(ebuf, ebuflen - 1,
1525                "Can't load %s-bit .dll on a %s-bit platform",
1526                lib_arch_str, running_arch_str);
1527  } else {
1528    // don't know what architecture this dll was build for
1529    ::_snprintf(ebuf, ebuflen - 1,
1530                "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1531                lib_arch, running_arch_str);
1532  }
1533
1534  return NULL;
1535}
1536
1537void os::print_dll_info(outputStream *st) {
1538  st->print_cr("Dynamic libraries:");
1539  get_loaded_modules_info(_print_module, (void *)st);
1540}
1541
1542int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1543  HANDLE   hProcess;
1544
1545# define MAX_NUM_MODULES 128
1546  HMODULE     modules[MAX_NUM_MODULES];
1547  static char filename[MAX_PATH];
1548  int         result = 0;
1549
1550  int pid = os::current_process_id();
1551  hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1552                         FALSE, pid);
1553  if (hProcess == NULL) return 0;
1554
1555  DWORD size_needed;
1556  if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1557    CloseHandle(hProcess);
1558    return 0;
1559  }
1560
1561  // number of modules that are currently loaded
1562  int num_modules = size_needed / sizeof(HMODULE);
1563
1564  for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1565    // Get Full pathname:
1566    if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1567      filename[0] = '\0';
1568    }
1569
1570    MODULEINFO modinfo;
1571    if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1572      modinfo.lpBaseOfDll = NULL;
1573      modinfo.SizeOfImage = 0;
1574    }
1575
1576    // Invoke callback function
1577    result = callback(filename, (address)modinfo.lpBaseOfDll,
1578                      (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1579    if (result) break;
1580  }
1581
1582  CloseHandle(hProcess);
1583  return result;
1584}
1585
1586bool os::get_host_name(char* buf, size_t buflen) {
1587  DWORD size = (DWORD)buflen;
1588  return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1589}
1590
1591void os::get_summary_os_info(char* buf, size_t buflen) {
1592  stringStream sst(buf, buflen);
1593  os::win32::print_windows_version(&sst);
1594  // chop off newline character
1595  char* nl = strchr(buf, '\n');
1596  if (nl != NULL) *nl = '\0';
1597}
1598
1599int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1600  int ret = vsnprintf(buf, len, fmt, args);
1601  // Get the correct buffer size if buf is too small
1602  if (ret < 0) {
1603    return _vscprintf(fmt, args);
1604  }
1605  return ret;
1606}
1607
1608static inline time_t get_mtime(const char* filename) {
1609  struct stat st;
1610  int ret = os::stat(filename, &st);
1611  assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1612  return st.st_mtime;
1613}
1614
1615int os::compare_file_modified_times(const char* file1, const char* file2) {
1616  time_t t1 = get_mtime(file1);
1617  time_t t2 = get_mtime(file2);
1618  return t1 - t2;
1619}
1620
1621void os::print_os_info_brief(outputStream* st) {
1622  os::print_os_info(st);
1623}
1624
1625void os::print_os_info(outputStream* st) {
1626#ifdef ASSERT
1627  char buffer[1024];
1628  st->print("HostName: ");
1629  if (get_host_name(buffer, sizeof(buffer))) {
1630    st->print("%s ", buffer);
1631  } else {
1632    st->print("N/A ");
1633  }
1634#endif
1635  st->print("OS:");
1636  os::win32::print_windows_version(st);
1637}
1638
1639void os::win32::print_windows_version(outputStream* st) {
1640  OSVERSIONINFOEX osvi;
1641  VS_FIXEDFILEINFO *file_info;
1642  TCHAR kernel32_path[MAX_PATH];
1643  UINT len, ret;
1644
1645  // Use the GetVersionEx information to see if we're on a server or
1646  // workstation edition of Windows. Starting with Windows 8.1 we can't
1647  // trust the OS version information returned by this API.
1648  ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1649  osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1650  if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1651    st->print_cr("Call to GetVersionEx failed");
1652    return;
1653  }
1654  bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1655
1656  // Get the full path to \Windows\System32\kernel32.dll and use that for
1657  // determining what version of Windows we're running on.
1658  len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1659  ret = GetSystemDirectory(kernel32_path, len);
1660  if (ret == 0 || ret > len) {
1661    st->print_cr("Call to GetSystemDirectory failed");
1662    return;
1663  }
1664  strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1665
1666  DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1667  if (version_size == 0) {
1668    st->print_cr("Call to GetFileVersionInfoSize failed");
1669    return;
1670  }
1671
1672  LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1673  if (version_info == NULL) {
1674    st->print_cr("Failed to allocate version_info");
1675    return;
1676  }
1677
1678  if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1679    os::free(version_info);
1680    st->print_cr("Call to GetFileVersionInfo failed");
1681    return;
1682  }
1683
1684  if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1685    os::free(version_info);
1686    st->print_cr("Call to VerQueryValue failed");
1687    return;
1688  }
1689
1690  int major_version = HIWORD(file_info->dwProductVersionMS);
1691  int minor_version = LOWORD(file_info->dwProductVersionMS);
1692  int build_number = HIWORD(file_info->dwProductVersionLS);
1693  int build_minor = LOWORD(file_info->dwProductVersionLS);
1694  int os_vers = major_version * 1000 + minor_version;
1695  os::free(version_info);
1696
1697  st->print(" Windows ");
1698  switch (os_vers) {
1699
1700  case 6000:
1701    if (is_workstation) {
1702      st->print("Vista");
1703    } else {
1704      st->print("Server 2008");
1705    }
1706    break;
1707
1708  case 6001:
1709    if (is_workstation) {
1710      st->print("7");
1711    } else {
1712      st->print("Server 2008 R2");
1713    }
1714    break;
1715
1716  case 6002:
1717    if (is_workstation) {
1718      st->print("8");
1719    } else {
1720      st->print("Server 2012");
1721    }
1722    break;
1723
1724  case 6003:
1725    if (is_workstation) {
1726      st->print("8.1");
1727    } else {
1728      st->print("Server 2012 R2");
1729    }
1730    break;
1731
1732  case 10000:
1733    if (is_workstation) {
1734      st->print("10");
1735    } else {
1736      st->print("Server 2016");
1737    }
1738    break;
1739
1740  default:
1741    // Unrecognized windows, print out its major and minor versions
1742    st->print("%d.%d", major_version, minor_version);
1743    break;
1744  }
1745
1746  // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1747  // find out whether we are running on 64 bit processor or not
1748  SYSTEM_INFO si;
1749  ZeroMemory(&si, sizeof(SYSTEM_INFO));
1750  GetNativeSystemInfo(&si);
1751  if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1752    st->print(" , 64 bit");
1753  }
1754
1755  st->print(" Build %d", build_number);
1756  st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1757  st->cr();
1758}
1759
1760void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1761  // Nothing to do for now.
1762}
1763
1764void os::get_summary_cpu_info(char* buf, size_t buflen) {
1765  HKEY key;
1766  DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1767               "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1768  if (status == ERROR_SUCCESS) {
1769    DWORD size = (DWORD)buflen;
1770    status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1771    if (status != ERROR_SUCCESS) {
1772        strncpy(buf, "## __CPU__", buflen);
1773    }
1774    RegCloseKey(key);
1775  } else {
1776    // Put generic cpu info to return
1777    strncpy(buf, "## __CPU__", buflen);
1778  }
1779}
1780
1781void os::print_memory_info(outputStream* st) {
1782  st->print("Memory:");
1783  st->print(" %dk page", os::vm_page_size()>>10);
1784
1785  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1786  // value if total memory is larger than 4GB
1787  MEMORYSTATUSEX ms;
1788  ms.dwLength = sizeof(ms);
1789  GlobalMemoryStatusEx(&ms);
1790
1791  st->print(", physical %uk", os::physical_memory() >> 10);
1792  st->print("(%uk free)", os::available_memory() >> 10);
1793
1794  st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1795  st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1796  st->cr();
1797}
1798
1799void os::print_siginfo(outputStream *st, const void* siginfo) {
1800  const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1801  st->print("siginfo:");
1802
1803  char tmp[64];
1804  if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1805    strcpy(tmp, "EXCEPTION_??");
1806  }
1807  st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1808
1809  if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1810       er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1811       er->NumberParameters >= 2) {
1812    switch (er->ExceptionInformation[0]) {
1813    case 0: st->print(", reading address"); break;
1814    case 1: st->print(", writing address"); break;
1815    case 8: st->print(", data execution prevention violation at address"); break;
1816    default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1817                       er->ExceptionInformation[0]);
1818    }
1819    st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1820  } else {
1821    int num = er->NumberParameters;
1822    if (num > 0) {
1823      st->print(", ExceptionInformation=");
1824      for (int i = 0; i < num; i++) {
1825        st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1826      }
1827    }
1828  }
1829  st->cr();
1830}
1831
1832void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1833  // do nothing
1834}
1835
1836static char saved_jvm_path[MAX_PATH] = {0};
1837
1838// Find the full path to the current module, jvm.dll
1839void os::jvm_path(char *buf, jint buflen) {
1840  // Error checking.
1841  if (buflen < MAX_PATH) {
1842    assert(false, "must use a large-enough buffer");
1843    buf[0] = '\0';
1844    return;
1845  }
1846  // Lazy resolve the path to current module.
1847  if (saved_jvm_path[0] != 0) {
1848    strcpy(buf, saved_jvm_path);
1849    return;
1850  }
1851
1852  buf[0] = '\0';
1853  if (Arguments::sun_java_launcher_is_altjvm()) {
1854    // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1855    // for a JAVA_HOME environment variable and fix up the path so it
1856    // looks like jvm.dll is installed there (append a fake suffix
1857    // hotspot/jvm.dll).
1858    char* java_home_var = ::getenv("JAVA_HOME");
1859    if (java_home_var != NULL && java_home_var[0] != 0 &&
1860        strlen(java_home_var) < (size_t)buflen) {
1861      strncpy(buf, java_home_var, buflen);
1862
1863      // determine if this is a legacy image or modules image
1864      // modules image doesn't have "jre" subdirectory
1865      size_t len = strlen(buf);
1866      char* jrebin_p = buf + len;
1867      jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1868      if (0 != _access(buf, 0)) {
1869        jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1870      }
1871      len = strlen(buf);
1872      jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1873    }
1874  }
1875
1876  if (buf[0] == '\0') {
1877    GetModuleFileName(vm_lib_handle, buf, buflen);
1878  }
1879  strncpy(saved_jvm_path, buf, MAX_PATH);
1880  saved_jvm_path[MAX_PATH - 1] = '\0';
1881}
1882
1883
1884void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1885#ifndef _WIN64
1886  st->print("_");
1887#endif
1888}
1889
1890
1891void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1892#ifndef _WIN64
1893  st->print("@%d", args_size  * sizeof(int));
1894#endif
1895}
1896
1897// This method is a copy of JDK's sysGetLastErrorString
1898// from src/windows/hpi/src/system_md.c
1899
1900size_t os::lasterror(char* buf, size_t len) {
1901  DWORD errval;
1902
1903  if ((errval = GetLastError()) != 0) {
1904    // DOS error
1905    size_t n = (size_t)FormatMessage(
1906                                     FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1907                                     NULL,
1908                                     errval,
1909                                     0,
1910                                     buf,
1911                                     (DWORD)len,
1912                                     NULL);
1913    if (n > 3) {
1914      // Drop final '.', CR, LF
1915      if (buf[n - 1] == '\n') n--;
1916      if (buf[n - 1] == '\r') n--;
1917      if (buf[n - 1] == '.') n--;
1918      buf[n] = '\0';
1919    }
1920    return n;
1921  }
1922
1923  if (errno != 0) {
1924    // C runtime error that has no corresponding DOS error code
1925    const char* s = os::strerror(errno);
1926    size_t n = strlen(s);
1927    if (n >= len) n = len - 1;
1928    strncpy(buf, s, n);
1929    buf[n] = '\0';
1930    return n;
1931  }
1932
1933  return 0;
1934}
1935
1936int os::get_last_error() {
1937  DWORD error = GetLastError();
1938  if (error == 0) {
1939    error = errno;
1940  }
1941  return (int)error;
1942}
1943
1944WindowsSemaphore::WindowsSemaphore(uint value) {
1945  _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1946
1947  guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1948}
1949
1950WindowsSemaphore::~WindowsSemaphore() {
1951  ::CloseHandle(_semaphore);
1952}
1953
1954void WindowsSemaphore::signal(uint count) {
1955  if (count > 0) {
1956    BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1957
1958    assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1959  }
1960}
1961
1962void WindowsSemaphore::wait() {
1963  DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1964  assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1965  assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1966}
1967
1968// sun.misc.Signal
1969// NOTE that this is a workaround for an apparent kernel bug where if
1970// a signal handler for SIGBREAK is installed then that signal handler
1971// takes priority over the console control handler for CTRL_CLOSE_EVENT.
1972// See bug 4416763.
1973static void (*sigbreakHandler)(int) = NULL;
1974
1975static void UserHandler(int sig, void *siginfo, void *context) {
1976  os::signal_notify(sig);
1977  // We need to reinstate the signal handler each time...
1978  os::signal(sig, (void*)UserHandler);
1979}
1980
1981void* os::user_handler() {
1982  return (void*) UserHandler;
1983}
1984
1985void* os::signal(int signal_number, void* handler) {
1986  if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1987    void (*oldHandler)(int) = sigbreakHandler;
1988    sigbreakHandler = (void (*)(int)) handler;
1989    return (void*) oldHandler;
1990  } else {
1991    return (void*)::signal(signal_number, (void (*)(int))handler);
1992  }
1993}
1994
1995void os::signal_raise(int signal_number) {
1996  raise(signal_number);
1997}
1998
1999// The Win32 C runtime library maps all console control events other than ^C
2000// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2001// logoff, and shutdown events.  We therefore install our own console handler
2002// that raises SIGTERM for the latter cases.
2003//
2004static BOOL WINAPI consoleHandler(DWORD event) {
2005  switch (event) {
2006  case CTRL_C_EVENT:
2007    if (is_error_reported()) {
2008      // Ctrl-C is pressed during error reporting, likely because the error
2009      // handler fails to abort. Let VM die immediately.
2010      os::die();
2011    }
2012
2013    os::signal_raise(SIGINT);
2014    return TRUE;
2015    break;
2016  case CTRL_BREAK_EVENT:
2017    if (sigbreakHandler != NULL) {
2018      (*sigbreakHandler)(SIGBREAK);
2019    }
2020    return TRUE;
2021    break;
2022  case CTRL_LOGOFF_EVENT: {
2023    // Don't terminate JVM if it is running in a non-interactive session,
2024    // such as a service process.
2025    USEROBJECTFLAGS flags;
2026    HANDLE handle = GetProcessWindowStation();
2027    if (handle != NULL &&
2028        GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2029        sizeof(USEROBJECTFLAGS), NULL)) {
2030      // If it is a non-interactive session, let next handler to deal
2031      // with it.
2032      if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2033        return FALSE;
2034      }
2035    }
2036  }
2037  case CTRL_CLOSE_EVENT:
2038  case CTRL_SHUTDOWN_EVENT:
2039    os::signal_raise(SIGTERM);
2040    return TRUE;
2041    break;
2042  default:
2043    break;
2044  }
2045  return FALSE;
2046}
2047
2048// The following code is moved from os.cpp for making this
2049// code platform specific, which it is by its very nature.
2050
2051// Return maximum OS signal used + 1 for internal use only
2052// Used as exit signal for signal_thread
2053int os::sigexitnum_pd() {
2054  return NSIG;
2055}
2056
2057// a counter for each possible signal value, including signal_thread exit signal
2058static volatile jint pending_signals[NSIG+1] = { 0 };
2059static HANDLE sig_sem = NULL;
2060
2061void os::signal_init_pd() {
2062  // Initialize signal structures
2063  memset((void*)pending_signals, 0, sizeof(pending_signals));
2064
2065  sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2066
2067  // Programs embedding the VM do not want it to attempt to receive
2068  // events like CTRL_LOGOFF_EVENT, which are used to implement the
2069  // shutdown hooks mechanism introduced in 1.3.  For example, when
2070  // the VM is run as part of a Windows NT service (i.e., a servlet
2071  // engine in a web server), the correct behavior is for any console
2072  // control handler to return FALSE, not TRUE, because the OS's
2073  // "final" handler for such events allows the process to continue if
2074  // it is a service (while terminating it if it is not a service).
2075  // To make this behavior uniform and the mechanism simpler, we
2076  // completely disable the VM's usage of these console events if -Xrs
2077  // (=ReduceSignalUsage) is specified.  This means, for example, that
2078  // the CTRL-BREAK thread dump mechanism is also disabled in this
2079  // case.  See bugs 4323062, 4345157, and related bugs.
2080
2081  if (!ReduceSignalUsage) {
2082    // Add a CTRL-C handler
2083    SetConsoleCtrlHandler(consoleHandler, TRUE);
2084  }
2085}
2086
2087void os::signal_notify(int signal_number) {
2088  BOOL ret;
2089  if (sig_sem != NULL) {
2090    Atomic::inc(&pending_signals[signal_number]);
2091    ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2092    assert(ret != 0, "ReleaseSemaphore() failed");
2093  }
2094}
2095
2096static int check_pending_signals(bool wait_for_signal) {
2097  DWORD ret;
2098  while (true) {
2099    for (int i = 0; i < NSIG + 1; i++) {
2100      jint n = pending_signals[i];
2101      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2102        return i;
2103      }
2104    }
2105    if (!wait_for_signal) {
2106      return -1;
2107    }
2108
2109    JavaThread *thread = JavaThread::current();
2110
2111    ThreadBlockInVM tbivm(thread);
2112
2113    bool threadIsSuspended;
2114    do {
2115      thread->set_suspend_equivalent();
2116      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2117      ret = ::WaitForSingleObject(sig_sem, INFINITE);
2118      assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2119
2120      // were we externally suspended while we were waiting?
2121      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2122      if (threadIsSuspended) {
2123        // The semaphore has been incremented, but while we were waiting
2124        // another thread suspended us. We don't want to continue running
2125        // while suspended because that would surprise the thread that
2126        // suspended us.
2127        ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2128        assert(ret != 0, "ReleaseSemaphore() failed");
2129
2130        thread->java_suspend_self();
2131      }
2132    } while (threadIsSuspended);
2133  }
2134}
2135
2136int os::signal_lookup() {
2137  return check_pending_signals(false);
2138}
2139
2140int os::signal_wait() {
2141  return check_pending_signals(true);
2142}
2143
2144// Implicit OS exception handling
2145
2146LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2147                      address handler) {
2148    JavaThread* thread = (JavaThread*) Thread::current_or_null();
2149  // Save pc in thread
2150#ifdef _M_IA64
2151  // Do not blow up if no thread info available.
2152  if (thread) {
2153    // Saving PRECISE pc (with slot information) in thread.
2154    uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2155    // Convert precise PC into "Unix" format
2156    precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2157    thread->set_saved_exception_pc((address)precise_pc);
2158  }
2159  // Set pc to handler
2160  exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2161  // Clear out psr.ri (= Restart Instruction) in order to continue
2162  // at the beginning of the target bundle.
2163  exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2164  assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2165#else
2166  #ifdef _M_AMD64
2167  // Do not blow up if no thread info available.
2168  if (thread) {
2169    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2170  }
2171  // Set pc to handler
2172  exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2173  #else
2174  // Do not blow up if no thread info available.
2175  if (thread) {
2176    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2177  }
2178  // Set pc to handler
2179  exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2180  #endif
2181#endif
2182
2183  // Continue the execution
2184  return EXCEPTION_CONTINUE_EXECUTION;
2185}
2186
2187
2188// Used for PostMortemDump
2189extern "C" void safepoints();
2190extern "C" void find(int x);
2191extern "C" void events();
2192
2193// According to Windows API documentation, an illegal instruction sequence should generate
2194// the 0xC000001C exception code. However, real world experience shows that occasionnaly
2195// the execution of an illegal instruction can generate the exception code 0xC000001E. This
2196// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2197
2198#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2199
2200// From "Execution Protection in the Windows Operating System" draft 0.35
2201// Once a system header becomes available, the "real" define should be
2202// included or copied here.
2203#define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2204
2205// Handle NAT Bit consumption on IA64.
2206#ifdef _M_IA64
2207  #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2208#endif
2209
2210// Windows Vista/2008 heap corruption check
2211#define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2212
2213// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2214// C++ compiler contain this error code. Because this is a compiler-generated
2215// error, the code is not listed in the Win32 API header files.
2216// The code is actually a cryptic mnemonic device, with the initial "E"
2217// standing for "exception" and the final 3 bytes (0x6D7363) representing the
2218// ASCII values of "msc".
2219
2220#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2221
2222#define def_excpt(val) { #val, (val) }
2223
2224static const struct { char* name; uint number; } exceptlabels[] = {
2225    def_excpt(EXCEPTION_ACCESS_VIOLATION),
2226    def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2227    def_excpt(EXCEPTION_BREAKPOINT),
2228    def_excpt(EXCEPTION_SINGLE_STEP),
2229    def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2230    def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2231    def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2232    def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2233    def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2234    def_excpt(EXCEPTION_FLT_OVERFLOW),
2235    def_excpt(EXCEPTION_FLT_STACK_CHECK),
2236    def_excpt(EXCEPTION_FLT_UNDERFLOW),
2237    def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2238    def_excpt(EXCEPTION_INT_OVERFLOW),
2239    def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2240    def_excpt(EXCEPTION_IN_PAGE_ERROR),
2241    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2242    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2243    def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2244    def_excpt(EXCEPTION_STACK_OVERFLOW),
2245    def_excpt(EXCEPTION_INVALID_DISPOSITION),
2246    def_excpt(EXCEPTION_GUARD_PAGE),
2247    def_excpt(EXCEPTION_INVALID_HANDLE),
2248    def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2249    def_excpt(EXCEPTION_HEAP_CORRUPTION)
2250#ifdef _M_IA64
2251    , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2252#endif
2253};
2254
2255#undef def_excpt
2256
2257const char* os::exception_name(int exception_code, char *buf, size_t size) {
2258  uint code = static_cast<uint>(exception_code);
2259  for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2260    if (exceptlabels[i].number == code) {
2261      jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2262      return buf;
2263    }
2264  }
2265
2266  return NULL;
2267}
2268
2269//-----------------------------------------------------------------------------
2270LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2271  // handle exception caused by idiv; should only happen for -MinInt/-1
2272  // (division by zero is handled explicitly)
2273#ifdef _M_IA64
2274  assert(0, "Fix Handle_IDiv_Exception");
2275#else
2276  #ifdef  _M_AMD64
2277  PCONTEXT ctx = exceptionInfo->ContextRecord;
2278  address pc = (address)ctx->Rip;
2279  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2280  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2281  if (pc[0] == 0xF7) {
2282    // set correct result values and continue after idiv instruction
2283    ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2284  } else {
2285    ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2286  }
2287  // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2288  // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2289  // idiv opcode (0xF7).
2290  ctx->Rdx = (DWORD)0;             // remainder
2291  // Continue the execution
2292  #else
2293  PCONTEXT ctx = exceptionInfo->ContextRecord;
2294  address pc = (address)ctx->Eip;
2295  assert(pc[0] == 0xF7, "not an idiv opcode");
2296  assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2297  assert(ctx->Eax == min_jint, "unexpected idiv exception");
2298  // set correct result values and continue after idiv instruction
2299  ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2300  ctx->Eax = (DWORD)min_jint;      // result
2301  ctx->Edx = (DWORD)0;             // remainder
2302  // Continue the execution
2303  #endif
2304#endif
2305  return EXCEPTION_CONTINUE_EXECUTION;
2306}
2307
2308//-----------------------------------------------------------------------------
2309LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2310  PCONTEXT ctx = exceptionInfo->ContextRecord;
2311#ifndef  _WIN64
2312  // handle exception caused by native method modifying control word
2313  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2314
2315  switch (exception_code) {
2316  case EXCEPTION_FLT_DENORMAL_OPERAND:
2317  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2318  case EXCEPTION_FLT_INEXACT_RESULT:
2319  case EXCEPTION_FLT_INVALID_OPERATION:
2320  case EXCEPTION_FLT_OVERFLOW:
2321  case EXCEPTION_FLT_STACK_CHECK:
2322  case EXCEPTION_FLT_UNDERFLOW:
2323    jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2324    if (fp_control_word != ctx->FloatSave.ControlWord) {
2325      // Restore FPCW and mask out FLT exceptions
2326      ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2327      // Mask out pending FLT exceptions
2328      ctx->FloatSave.StatusWord &=  0xffffff00;
2329      return EXCEPTION_CONTINUE_EXECUTION;
2330    }
2331  }
2332
2333  if (prev_uef_handler != NULL) {
2334    // We didn't handle this exception so pass it to the previous
2335    // UnhandledExceptionFilter.
2336    return (prev_uef_handler)(exceptionInfo);
2337  }
2338#else // !_WIN64
2339  // On Windows, the mxcsr control bits are non-volatile across calls
2340  // See also CR 6192333
2341  //
2342  jint MxCsr = INITIAL_MXCSR;
2343  // we can't use StubRoutines::addr_mxcsr_std()
2344  // because in Win64 mxcsr is not saved there
2345  if (MxCsr != ctx->MxCsr) {
2346    ctx->MxCsr = MxCsr;
2347    return EXCEPTION_CONTINUE_EXECUTION;
2348  }
2349#endif // !_WIN64
2350
2351  return EXCEPTION_CONTINUE_SEARCH;
2352}
2353
2354static inline void report_error(Thread* t, DWORD exception_code,
2355                                address addr, void* siginfo, void* context) {
2356  VMError::report_and_die(t, exception_code, addr, siginfo, context);
2357
2358  // If UseOsErrorReporting, this will return here and save the error file
2359  // somewhere where we can find it in the minidump.
2360}
2361
2362bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2363        struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2364  PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2365  address addr = (address) exceptionRecord->ExceptionInformation[1];
2366  if (Interpreter::contains(pc)) {
2367    *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2368    if (!fr->is_first_java_frame()) {
2369      assert(fr->safe_for_sender(thread), "Safety check");
2370      *fr = fr->java_sender();
2371    }
2372  } else {
2373    // more complex code with compiled code
2374    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2375    CodeBlob* cb = CodeCache::find_blob(pc);
2376    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2377      // Not sure where the pc points to, fallback to default
2378      // stack overflow handling
2379      return false;
2380    } else {
2381      *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2382      // in compiled code, the stack banging is performed just after the return pc
2383      // has been pushed on the stack
2384      *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2385      if (!fr->is_java_frame()) {
2386        assert(fr->safe_for_sender(thread), "Safety check");
2387        *fr = fr->java_sender();
2388      }
2389    }
2390  }
2391  assert(fr->is_java_frame(), "Safety check");
2392  return true;
2393}
2394
2395//-----------------------------------------------------------------------------
2396LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2397  if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2398  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2399#ifdef _M_IA64
2400  // On Itanium, we need the "precise pc", which has the slot number coded
2401  // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2402  address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2403  // Convert the pc to "Unix format", which has the slot number coded
2404  // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2405  // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2406  // information is saved in the Unix format.
2407  address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2408#else
2409  #ifdef _M_AMD64
2410  address pc = (address) exceptionInfo->ContextRecord->Rip;
2411  #else
2412  address pc = (address) exceptionInfo->ContextRecord->Eip;
2413  #endif
2414#endif
2415  Thread* t = Thread::current_or_null_safe();
2416
2417  // Handle SafeFetch32 and SafeFetchN exceptions.
2418  if (StubRoutines::is_safefetch_fault(pc)) {
2419    return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2420  }
2421
2422#ifndef _WIN64
2423  // Execution protection violation - win32 running on AMD64 only
2424  // Handled first to avoid misdiagnosis as a "normal" access violation;
2425  // This is safe to do because we have a new/unique ExceptionInformation
2426  // code for this condition.
2427  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2428    PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2429    int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2430    address addr = (address) exceptionRecord->ExceptionInformation[1];
2431
2432    if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2433      int page_size = os::vm_page_size();
2434
2435      // Make sure the pc and the faulting address are sane.
2436      //
2437      // If an instruction spans a page boundary, and the page containing
2438      // the beginning of the instruction is executable but the following
2439      // page is not, the pc and the faulting address might be slightly
2440      // different - we still want to unguard the 2nd page in this case.
2441      //
2442      // 15 bytes seems to be a (very) safe value for max instruction size.
2443      bool pc_is_near_addr =
2444        (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2445      bool instr_spans_page_boundary =
2446        (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2447                         (intptr_t) page_size) > 0);
2448
2449      if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2450        static volatile address last_addr =
2451          (address) os::non_memory_address_word();
2452
2453        // In conservative mode, don't unguard unless the address is in the VM
2454        if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2455            (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2456
2457          // Set memory to RWX and retry
2458          address page_start =
2459            (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2460          bool res = os::protect_memory((char*) page_start, page_size,
2461                                        os::MEM_PROT_RWX);
2462
2463          log_debug(os)("Execution protection violation "
2464                        "at " INTPTR_FORMAT
2465                        ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2466                        p2i(page_start), (res ? "success" : os::strerror(errno)));
2467
2468          // Set last_addr so if we fault again at the same address, we don't
2469          // end up in an endless loop.
2470          //
2471          // There are two potential complications here.  Two threads trapping
2472          // at the same address at the same time could cause one of the
2473          // threads to think it already unguarded, and abort the VM.  Likely
2474          // very rare.
2475          //
2476          // The other race involves two threads alternately trapping at
2477          // different addresses and failing to unguard the page, resulting in
2478          // an endless loop.  This condition is probably even more unlikely
2479          // than the first.
2480          //
2481          // Although both cases could be avoided by using locks or thread
2482          // local last_addr, these solutions are unnecessary complication:
2483          // this handler is a best-effort safety net, not a complete solution.
2484          // It is disabled by default and should only be used as a workaround
2485          // in case we missed any no-execute-unsafe VM code.
2486
2487          last_addr = addr;
2488
2489          return EXCEPTION_CONTINUE_EXECUTION;
2490        }
2491      }
2492
2493      // Last unguard failed or not unguarding
2494      tty->print_raw_cr("Execution protection violation");
2495      report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2496                   exceptionInfo->ContextRecord);
2497      return EXCEPTION_CONTINUE_SEARCH;
2498    }
2499  }
2500#endif // _WIN64
2501
2502  // Check to see if we caught the safepoint code in the
2503  // process of write protecting the memory serialization page.
2504  // It write enables the page immediately after protecting it
2505  // so just return.
2506  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2507    if (t != NULL && t->is_Java_thread()) {
2508      JavaThread* thread = (JavaThread*) t;
2509      PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2510      address addr = (address) exceptionRecord->ExceptionInformation[1];
2511      if (os::is_memory_serialize_page(thread, addr)) {
2512        // Block current thread until the memory serialize page permission restored.
2513        os::block_on_serialize_page_trap();
2514        return EXCEPTION_CONTINUE_EXECUTION;
2515      }
2516    }
2517  }
2518
2519  if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2520      VM_Version::is_cpuinfo_segv_addr(pc)) {
2521    // Verify that OS save/restore AVX registers.
2522    return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2523  }
2524
2525  if (t != NULL && t->is_Java_thread()) {
2526    JavaThread* thread = (JavaThread*) t;
2527    bool in_java = thread->thread_state() == _thread_in_Java;
2528
2529    // Handle potential stack overflows up front.
2530    if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2531#ifdef _M_IA64
2532      // Use guard page for register stack.
2533      PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2534      address addr = (address) exceptionRecord->ExceptionInformation[1];
2535      // Check for a register stack overflow on Itanium
2536      if (thread->addr_inside_register_stack_red_zone(addr)) {
2537        // Fatal red zone violation happens if the Java program
2538        // catches a StackOverflow error and does so much processing
2539        // that it runs beyond the unprotected yellow guard zone. As
2540        // a result, we are out of here.
2541        fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2542      } else if(thread->addr_inside_register_stack(addr)) {
2543        // Disable the yellow zone which sets the state that
2544        // we've got a stack overflow problem.
2545        if (thread->stack_yellow_reserved_zone_enabled()) {
2546          thread->disable_stack_yellow_reserved_zone();
2547        }
2548        // Give us some room to process the exception.
2549        thread->disable_register_stack_guard();
2550        // Tracing with +Verbose.
2551        if (Verbose) {
2552          tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2553          tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2554          tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2555          tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2556                        thread->register_stack_base(),
2557                        thread->register_stack_base() + thread->stack_size());
2558        }
2559
2560        // Reguard the permanent register stack red zone just to be sure.
2561        // We saw Windows silently disabling this without telling us.
2562        thread->enable_register_stack_red_zone();
2563
2564        return Handle_Exception(exceptionInfo,
2565                                SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2566      }
2567#endif
2568      if (thread->stack_guards_enabled()) {
2569        if (in_java) {
2570          frame fr;
2571          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2572          address addr = (address) exceptionRecord->ExceptionInformation[1];
2573          if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2574            assert(fr.is_java_frame(), "Must be a Java frame");
2575            SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2576          }
2577        }
2578        // Yellow zone violation.  The o/s has unprotected the first yellow
2579        // zone page for us.  Note:  must call disable_stack_yellow_zone to
2580        // update the enabled status, even if the zone contains only one page.
2581        assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2582        thread->disable_stack_yellow_reserved_zone();
2583        // If not in java code, return and hope for the best.
2584        return in_java
2585            ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2586            :  EXCEPTION_CONTINUE_EXECUTION;
2587      } else {
2588        // Fatal red zone violation.
2589        thread->disable_stack_red_zone();
2590        tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2591        report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2592                      exceptionInfo->ContextRecord);
2593        return EXCEPTION_CONTINUE_SEARCH;
2594      }
2595    } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2596      // Either stack overflow or null pointer exception.
2597      if (in_java) {
2598        PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2599        address addr = (address) exceptionRecord->ExceptionInformation[1];
2600        address stack_end = thread->stack_end();
2601        if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2602          // Stack overflow.
2603          assert(!os::uses_stack_guard_pages(),
2604                 "should be caught by red zone code above.");
2605          return Handle_Exception(exceptionInfo,
2606                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2607        }
2608        // Check for safepoint polling and implicit null
2609        // We only expect null pointers in the stubs (vtable)
2610        // the rest are checked explicitly now.
2611        CodeBlob* cb = CodeCache::find_blob(pc);
2612        if (cb != NULL) {
2613          if (os::is_poll_address(addr)) {
2614            address stub = SharedRuntime::get_poll_stub(pc);
2615            return Handle_Exception(exceptionInfo, stub);
2616          }
2617        }
2618        {
2619#ifdef _WIN64
2620          // If it's a legal stack address map the entire region in
2621          //
2622          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2623          address addr = (address) exceptionRecord->ExceptionInformation[1];
2624          if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2625            addr = (address)((uintptr_t)addr &
2626                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2627            os::commit_memory((char *)addr, thread->stack_base() - addr,
2628                              !ExecMem);
2629            return EXCEPTION_CONTINUE_EXECUTION;
2630          } else
2631#endif
2632          {
2633            // Null pointer exception.
2634#ifdef _M_IA64
2635            // Process implicit null checks in compiled code. Note: Implicit null checks
2636            // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2637            if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2638              CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2639              // Handle implicit null check in UEP method entry
2640              if (cb && (cb->is_frame_complete_at(pc) ||
2641                         (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2642                if (Verbose) {
2643                  intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2644                  tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2645                  tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2646                  tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2647                                *(bundle_start + 1), *bundle_start);
2648                }
2649                return Handle_Exception(exceptionInfo,
2650                                        SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2651              }
2652            }
2653
2654            // Implicit null checks were processed above.  Hence, we should not reach
2655            // here in the usual case => die!
2656            if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2657            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2658                         exceptionInfo->ContextRecord);
2659            return EXCEPTION_CONTINUE_SEARCH;
2660
2661#else // !IA64
2662
2663            if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2664              address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2665              if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2666            }
2667            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2668                         exceptionInfo->ContextRecord);
2669            return EXCEPTION_CONTINUE_SEARCH;
2670#endif
2671          }
2672        }
2673      }
2674
2675#ifdef _WIN64
2676      // Special care for fast JNI field accessors.
2677      // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2678      // in and the heap gets shrunk before the field access.
2679      if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2680        address addr = JNI_FastGetField::find_slowcase_pc(pc);
2681        if (addr != (address)-1) {
2682          return Handle_Exception(exceptionInfo, addr);
2683        }
2684      }
2685#endif
2686
2687      // Stack overflow or null pointer exception in native code.
2688      report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2689                   exceptionInfo->ContextRecord);
2690      return EXCEPTION_CONTINUE_SEARCH;
2691    } // /EXCEPTION_ACCESS_VIOLATION
2692    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2693#if defined _M_IA64
2694    else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2695              exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2696      M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2697
2698      // Compiled method patched to be non entrant? Following conditions must apply:
2699      // 1. must be first instruction in bundle
2700      // 2. must be a break instruction with appropriate code
2701      if ((((uint64_t) pc & 0x0F) == 0) &&
2702          (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2703        return Handle_Exception(exceptionInfo,
2704                                (address)SharedRuntime::get_handle_wrong_method_stub());
2705      }
2706    } // /EXCEPTION_ILLEGAL_INSTRUCTION
2707#endif
2708
2709
2710    if (in_java) {
2711      switch (exception_code) {
2712      case EXCEPTION_INT_DIVIDE_BY_ZERO:
2713        return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2714
2715      case EXCEPTION_INT_OVERFLOW:
2716        return Handle_IDiv_Exception(exceptionInfo);
2717
2718      } // switch
2719    }
2720    if (((thread->thread_state() == _thread_in_Java) ||
2721         (thread->thread_state() == _thread_in_native)) &&
2722         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2723      LONG result=Handle_FLT_Exception(exceptionInfo);
2724      if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2725    }
2726  }
2727
2728  if (exception_code != EXCEPTION_BREAKPOINT) {
2729    report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2730                 exceptionInfo->ContextRecord);
2731  }
2732  return EXCEPTION_CONTINUE_SEARCH;
2733}
2734
2735#ifndef _WIN64
2736// Special care for fast JNI accessors.
2737// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2738// the heap gets shrunk before the field access.
2739// Need to install our own structured exception handler since native code may
2740// install its own.
2741LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2742  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2743  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2744    address pc = (address) exceptionInfo->ContextRecord->Eip;
2745    address addr = JNI_FastGetField::find_slowcase_pc(pc);
2746    if (addr != (address)-1) {
2747      return Handle_Exception(exceptionInfo, addr);
2748    }
2749  }
2750  return EXCEPTION_CONTINUE_SEARCH;
2751}
2752
2753#define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2754  Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2755                                                     jobject obj,           \
2756                                                     jfieldID fieldID) {    \
2757    __try {                                                                 \
2758      return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2759                                                                 obj,       \
2760                                                                 fieldID);  \
2761    } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2762                                              _exception_info())) {         \
2763    }                                                                       \
2764    return 0;                                                               \
2765  }
2766
2767DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2768DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2769DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2770DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2771DEFINE_FAST_GETFIELD(jint,     int,    Int)
2772DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2773DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2774DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2775
2776address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2777  switch (type) {
2778  case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2779  case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2780  case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2781  case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2782  case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2783  case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2784  case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2785  case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2786  default:        ShouldNotReachHere();
2787  }
2788  return (address)-1;
2789}
2790#endif
2791
2792// Virtual Memory
2793
2794int os::vm_page_size() { return os::win32::vm_page_size(); }
2795int os::vm_allocation_granularity() {
2796  return os::win32::vm_allocation_granularity();
2797}
2798
2799// Windows large page support is available on Windows 2003. In order to use
2800// large page memory, the administrator must first assign additional privilege
2801// to the user:
2802//   + select Control Panel -> Administrative Tools -> Local Security Policy
2803//   + select Local Policies -> User Rights Assignment
2804//   + double click "Lock pages in memory", add users and/or groups
2805//   + reboot
2806// Note the above steps are needed for administrator as well, as administrators
2807// by default do not have the privilege to lock pages in memory.
2808//
2809// Note about Windows 2003: although the API supports committing large page
2810// memory on a page-by-page basis and VirtualAlloc() returns success under this
2811// scenario, I found through experiment it only uses large page if the entire
2812// memory region is reserved and committed in a single VirtualAlloc() call.
2813// This makes Windows large page support more or less like Solaris ISM, in
2814// that the entire heap must be committed upfront. This probably will change
2815// in the future, if so the code below needs to be revisited.
2816
2817#ifndef MEM_LARGE_PAGES
2818  #define MEM_LARGE_PAGES 0x20000000
2819#endif
2820
2821static HANDLE    _hProcess;
2822static HANDLE    _hToken;
2823
2824// Container for NUMA node list info
2825class NUMANodeListHolder {
2826 private:
2827  int *_numa_used_node_list;  // allocated below
2828  int _numa_used_node_count;
2829
2830  void free_node_list() {
2831    if (_numa_used_node_list != NULL) {
2832      FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2833    }
2834  }
2835
2836 public:
2837  NUMANodeListHolder() {
2838    _numa_used_node_count = 0;
2839    _numa_used_node_list = NULL;
2840    // do rest of initialization in build routine (after function pointers are set up)
2841  }
2842
2843  ~NUMANodeListHolder() {
2844    free_node_list();
2845  }
2846
2847  bool build() {
2848    DWORD_PTR proc_aff_mask;
2849    DWORD_PTR sys_aff_mask;
2850    if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2851    ULONG highest_node_number;
2852    if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2853    free_node_list();
2854    _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2855    for (unsigned int i = 0; i <= highest_node_number; i++) {
2856      ULONGLONG proc_mask_numa_node;
2857      if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2858      if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2859        _numa_used_node_list[_numa_used_node_count++] = i;
2860      }
2861    }
2862    return (_numa_used_node_count > 1);
2863  }
2864
2865  int get_count() { return _numa_used_node_count; }
2866  int get_node_list_entry(int n) {
2867    // for indexes out of range, returns -1
2868    return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2869  }
2870
2871} numa_node_list_holder;
2872
2873
2874
2875static size_t _large_page_size = 0;
2876
2877static bool request_lock_memory_privilege() {
2878  _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2879                          os::current_process_id());
2880
2881  LUID luid;
2882  if (_hProcess != NULL &&
2883      OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2884      LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2885
2886    TOKEN_PRIVILEGES tp;
2887    tp.PrivilegeCount = 1;
2888    tp.Privileges[0].Luid = luid;
2889    tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2890
2891    // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2892    // privilege. Check GetLastError() too. See MSDN document.
2893    if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2894        (GetLastError() == ERROR_SUCCESS)) {
2895      return true;
2896    }
2897  }
2898
2899  return false;
2900}
2901
2902static void cleanup_after_large_page_init() {
2903  if (_hProcess) CloseHandle(_hProcess);
2904  _hProcess = NULL;
2905  if (_hToken) CloseHandle(_hToken);
2906  _hToken = NULL;
2907}
2908
2909static bool numa_interleaving_init() {
2910  bool success = false;
2911  bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2912
2913  // print a warning if UseNUMAInterleaving flag is specified on command line
2914  bool warn_on_failure = use_numa_interleaving_specified;
2915#define WARN(msg) if (warn_on_failure) { warning(msg); }
2916
2917  // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2918  size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2919  NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2920
2921  if (numa_node_list_holder.build()) {
2922    if (log_is_enabled(Debug, os, cpu)) {
2923      Log(os, cpu) log;
2924      log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2925      for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2926        log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2927      }
2928    }
2929    success = true;
2930  } else {
2931    WARN("Process does not cover multiple NUMA nodes.");
2932  }
2933  if (!success) {
2934    if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2935  }
2936  return success;
2937#undef WARN
2938}
2939
2940// this routine is used whenever we need to reserve a contiguous VA range
2941// but we need to make separate VirtualAlloc calls for each piece of the range
2942// Reasons for doing this:
2943//  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2944//  * UseNUMAInterleaving requires a separate node for each piece
2945static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2946                                         DWORD prot,
2947                                         bool should_inject_error = false) {
2948  char * p_buf;
2949  // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2950  size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2951  size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2952
2953  // first reserve enough address space in advance since we want to be
2954  // able to break a single contiguous virtual address range into multiple
2955  // large page commits but WS2003 does not allow reserving large page space
2956  // so we just use 4K pages for reserve, this gives us a legal contiguous
2957  // address space. then we will deallocate that reservation, and re alloc
2958  // using large pages
2959  const size_t size_of_reserve = bytes + chunk_size;
2960  if (bytes > size_of_reserve) {
2961    // Overflowed.
2962    return NULL;
2963  }
2964  p_buf = (char *) VirtualAlloc(addr,
2965                                size_of_reserve,  // size of Reserve
2966                                MEM_RESERVE,
2967                                PAGE_READWRITE);
2968  // If reservation failed, return NULL
2969  if (p_buf == NULL) return NULL;
2970  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2971  os::release_memory(p_buf, bytes + chunk_size);
2972
2973  // we still need to round up to a page boundary (in case we are using large pages)
2974  // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2975  // instead we handle this in the bytes_to_rq computation below
2976  p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2977
2978  // now go through and allocate one chunk at a time until all bytes are
2979  // allocated
2980  size_t  bytes_remaining = bytes;
2981  // An overflow of align_size_up() would have been caught above
2982  // in the calculation of size_of_reserve.
2983  char * next_alloc_addr = p_buf;
2984  HANDLE hProc = GetCurrentProcess();
2985
2986#ifdef ASSERT
2987  // Variable for the failure injection
2988  long ran_num = os::random();
2989  size_t fail_after = ran_num % bytes;
2990#endif
2991
2992  int count=0;
2993  while (bytes_remaining) {
2994    // select bytes_to_rq to get to the next chunk_size boundary
2995
2996    size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2997    // Note allocate and commit
2998    char * p_new;
2999
3000#ifdef ASSERT
3001    bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3002#else
3003    const bool inject_error_now = false;
3004#endif
3005
3006    if (inject_error_now) {
3007      p_new = NULL;
3008    } else {
3009      if (!UseNUMAInterleaving) {
3010        p_new = (char *) VirtualAlloc(next_alloc_addr,
3011                                      bytes_to_rq,
3012                                      flags,
3013                                      prot);
3014      } else {
3015        // get the next node to use from the used_node_list
3016        assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3017        DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3018        p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3019      }
3020    }
3021
3022    if (p_new == NULL) {
3023      // Free any allocated pages
3024      if (next_alloc_addr > p_buf) {
3025        // Some memory was committed so release it.
3026        size_t bytes_to_release = bytes - bytes_remaining;
3027        // NMT has yet to record any individual blocks, so it
3028        // need to create a dummy 'reserve' record to match
3029        // the release.
3030        MemTracker::record_virtual_memory_reserve((address)p_buf,
3031                                                  bytes_to_release, CALLER_PC);
3032        os::release_memory(p_buf, bytes_to_release);
3033      }
3034#ifdef ASSERT
3035      if (should_inject_error) {
3036        log_develop_debug(pagesize)("Reserving pages individually failed.");
3037      }
3038#endif
3039      return NULL;
3040    }
3041
3042    bytes_remaining -= bytes_to_rq;
3043    next_alloc_addr += bytes_to_rq;
3044    count++;
3045  }
3046  // Although the memory is allocated individually, it is returned as one.
3047  // NMT records it as one block.
3048  if ((flags & MEM_COMMIT) != 0) {
3049    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3050  } else {
3051    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3052  }
3053
3054  // made it this far, success
3055  return p_buf;
3056}
3057
3058
3059
3060void os::large_page_init() {
3061  if (!UseLargePages) return;
3062
3063  // print a warning if any large page related flag is specified on command line
3064  bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3065                         !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3066  bool success = false;
3067
3068#define WARN(msg) if (warn_on_failure) { warning(msg); }
3069  if (request_lock_memory_privilege()) {
3070    size_t s = GetLargePageMinimum();
3071    if (s) {
3072#if defined(IA32) || defined(AMD64)
3073      if (s > 4*M || LargePageSizeInBytes > 4*M) {
3074        WARN("JVM cannot use large pages bigger than 4mb.");
3075      } else {
3076#endif
3077        if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3078          _large_page_size = LargePageSizeInBytes;
3079        } else {
3080          _large_page_size = s;
3081        }
3082        success = true;
3083#if defined(IA32) || defined(AMD64)
3084      }
3085#endif
3086    } else {
3087      WARN("Large page is not supported by the processor.");
3088    }
3089  } else {
3090    WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3091  }
3092#undef WARN
3093
3094  const size_t default_page_size = (size_t) vm_page_size();
3095  if (success && _large_page_size > default_page_size) {
3096    _page_sizes[0] = _large_page_size;
3097    _page_sizes[1] = default_page_size;
3098    _page_sizes[2] = 0;
3099  }
3100
3101  cleanup_after_large_page_init();
3102  UseLargePages = success;
3103}
3104
3105// On win32, one cannot release just a part of reserved memory, it's an
3106// all or nothing deal.  When we split a reservation, we must break the
3107// reservation into two reservations.
3108void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3109                                  bool realloc) {
3110  if (size > 0) {
3111    release_memory(base, size);
3112    if (realloc) {
3113      reserve_memory(split, base);
3114    }
3115    if (size != split) {
3116      reserve_memory(size - split, base + split);
3117    }
3118  }
3119}
3120
3121// Multiple threads can race in this code but it's not possible to unmap small sections of
3122// virtual space to get requested alignment, like posix-like os's.
3123// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3124char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3125  assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3126         "Alignment must be a multiple of allocation granularity (page size)");
3127  assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3128
3129  size_t extra_size = size + alignment;
3130  assert(extra_size >= size, "overflow, size is too large to allow alignment");
3131
3132  char* aligned_base = NULL;
3133
3134  do {
3135    char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3136    if (extra_base == NULL) {
3137      return NULL;
3138    }
3139    // Do manual alignment
3140    aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3141
3142    os::release_memory(extra_base, extra_size);
3143
3144    aligned_base = os::reserve_memory(size, aligned_base);
3145
3146  } while (aligned_base == NULL);
3147
3148  return aligned_base;
3149}
3150
3151char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3152  assert((size_t)addr % os::vm_allocation_granularity() == 0,
3153         "reserve alignment");
3154  assert(bytes % os::vm_page_size() == 0, "reserve page size");
3155  char* res;
3156  // note that if UseLargePages is on, all the areas that require interleaving
3157  // will go thru reserve_memory_special rather than thru here.
3158  bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3159  if (!use_individual) {
3160    res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3161  } else {
3162    elapsedTimer reserveTimer;
3163    if (Verbose && PrintMiscellaneous) reserveTimer.start();
3164    // in numa interleaving, we have to allocate pages individually
3165    // (well really chunks of NUMAInterleaveGranularity size)
3166    res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3167    if (res == NULL) {
3168      warning("NUMA page allocation failed");
3169    }
3170    if (Verbose && PrintMiscellaneous) {
3171      reserveTimer.stop();
3172      tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3173                    reserveTimer.milliseconds(), reserveTimer.ticks());
3174    }
3175  }
3176  assert(res == NULL || addr == NULL || addr == res,
3177         "Unexpected address from reserve.");
3178
3179  return res;
3180}
3181
3182// Reserve memory at an arbitrary address, only if that area is
3183// available (and not reserved for something else).
3184char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3185  // Windows os::reserve_memory() fails of the requested address range is
3186  // not avilable.
3187  return reserve_memory(bytes, requested_addr);
3188}
3189
3190size_t os::large_page_size() {
3191  return _large_page_size;
3192}
3193
3194bool os::can_commit_large_page_memory() {
3195  // Windows only uses large page memory when the entire region is reserved
3196  // and committed in a single VirtualAlloc() call. This may change in the
3197  // future, but with Windows 2003 it's not possible to commit on demand.
3198  return false;
3199}
3200
3201bool os::can_execute_large_page_memory() {
3202  return true;
3203}
3204
3205char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3206                                 bool exec) {
3207  assert(UseLargePages, "only for large pages");
3208
3209  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3210    return NULL; // Fallback to small pages.
3211  }
3212
3213  const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3214  const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3215
3216  // with large pages, there are two cases where we need to use Individual Allocation
3217  // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3218  // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3219  if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3220    log_debug(pagesize)("Reserving large pages individually.");
3221
3222    char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3223    if (p_buf == NULL) {
3224      // give an appropriate warning message
3225      if (UseNUMAInterleaving) {
3226        warning("NUMA large page allocation failed, UseLargePages flag ignored");
3227      }
3228      if (UseLargePagesIndividualAllocation) {
3229        warning("Individually allocated large pages failed, "
3230                "use -XX:-UseLargePagesIndividualAllocation to turn off");
3231      }
3232      return NULL;
3233    }
3234
3235    return p_buf;
3236
3237  } else {
3238    log_debug(pagesize)("Reserving large pages in a single large chunk.");
3239
3240    // normal policy just allocate it all at once
3241    DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3242    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3243    if (res != NULL) {
3244      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3245    }
3246
3247    return res;
3248  }
3249}
3250
3251bool os::release_memory_special(char* base, size_t bytes) {
3252  assert(base != NULL, "Sanity check");
3253  return release_memory(base, bytes);
3254}
3255
3256void os::print_statistics() {
3257}
3258
3259static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3260  int err = os::get_last_error();
3261  char buf[256];
3262  size_t buf_len = os::lasterror(buf, sizeof(buf));
3263  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3264          ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3265          exec, buf_len != 0 ? buf : "<no_error_string>", err);
3266}
3267
3268bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3269  if (bytes == 0) {
3270    // Don't bother the OS with noops.
3271    return true;
3272  }
3273  assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3274  assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3275  // Don't attempt to print anything if the OS call fails. We're
3276  // probably low on resources, so the print itself may cause crashes.
3277
3278  // unless we have NUMAInterleaving enabled, the range of a commit
3279  // is always within a reserve covered by a single VirtualAlloc
3280  // in that case we can just do a single commit for the requested size
3281  if (!UseNUMAInterleaving) {
3282    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3283      NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3284      return false;
3285    }
3286    if (exec) {
3287      DWORD oldprot;
3288      // Windows doc says to use VirtualProtect to get execute permissions
3289      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3290        NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3291        return false;
3292      }
3293    }
3294    return true;
3295  } else {
3296
3297    // when NUMAInterleaving is enabled, the commit might cover a range that
3298    // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3299    // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3300    // returns represents the number of bytes that can be committed in one step.
3301    size_t bytes_remaining = bytes;
3302    char * next_alloc_addr = addr;
3303    while (bytes_remaining > 0) {
3304      MEMORY_BASIC_INFORMATION alloc_info;
3305      VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3306      size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3307      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3308                       PAGE_READWRITE) == NULL) {
3309        NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3310                                            exec);)
3311        return false;
3312      }
3313      if (exec) {
3314        DWORD oldprot;
3315        if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3316                            PAGE_EXECUTE_READWRITE, &oldprot)) {
3317          NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3318                                              exec);)
3319          return false;
3320        }
3321      }
3322      bytes_remaining -= bytes_to_rq;
3323      next_alloc_addr += bytes_to_rq;
3324    }
3325  }
3326  // if we made it this far, return true
3327  return true;
3328}
3329
3330bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3331                          bool exec) {
3332  // alignment_hint is ignored on this OS
3333  return pd_commit_memory(addr, size, exec);
3334}
3335
3336void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3337                                  const char* mesg) {
3338  assert(mesg != NULL, "mesg must be specified");
3339  if (!pd_commit_memory(addr, size, exec)) {
3340    warn_fail_commit_memory(addr, size, exec);
3341    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3342  }
3343}
3344
3345void os::pd_commit_memory_or_exit(char* addr, size_t size,
3346                                  size_t alignment_hint, bool exec,
3347                                  const char* mesg) {
3348  // alignment_hint is ignored on this OS
3349  pd_commit_memory_or_exit(addr, size, exec, mesg);
3350}
3351
3352bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3353  if (bytes == 0) {
3354    // Don't bother the OS with noops.
3355    return true;
3356  }
3357  assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3358  assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3359  return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3360}
3361
3362bool os::pd_release_memory(char* addr, size_t bytes) {
3363  return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3364}
3365
3366bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3367  return os::commit_memory(addr, size, !ExecMem);
3368}
3369
3370bool os::remove_stack_guard_pages(char* addr, size_t size) {
3371  return os::uncommit_memory(addr, size);
3372}
3373
3374static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3375  uint count = 0;
3376  bool ret = false;
3377  size_t bytes_remaining = bytes;
3378  char * next_protect_addr = addr;
3379
3380  // Use VirtualQuery() to get the chunk size.
3381  while (bytes_remaining) {
3382    MEMORY_BASIC_INFORMATION alloc_info;
3383    if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3384      return false;
3385    }
3386
3387    size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3388    // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3389    // but we don't distinguish here as both cases are protected by same API.
3390    ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3391    warning("Failed protecting pages individually for chunk #%u", count);
3392    if (!ret) {
3393      return false;
3394    }
3395
3396    bytes_remaining -= bytes_to_protect;
3397    next_protect_addr += bytes_to_protect;
3398    count++;
3399  }
3400  return ret;
3401}
3402
3403// Set protections specified
3404bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3405                        bool is_committed) {
3406  unsigned int p = 0;
3407  switch (prot) {
3408  case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3409  case MEM_PROT_READ: p = PAGE_READONLY; break;
3410  case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3411  case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3412  default:
3413    ShouldNotReachHere();
3414  }
3415
3416  DWORD old_status;
3417
3418  // Strange enough, but on Win32 one can change protection only for committed
3419  // memory, not a big deal anyway, as bytes less or equal than 64K
3420  if (!is_committed) {
3421    commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3422                          "cannot commit protection page");
3423  }
3424  // One cannot use os::guard_memory() here, as on Win32 guard page
3425  // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3426  //
3427  // Pages in the region become guard pages. Any attempt to access a guard page
3428  // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3429  // the guard page status. Guard pages thus act as a one-time access alarm.
3430  bool ret;
3431  if (UseNUMAInterleaving) {
3432    // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3433    // so we must protect the chunks individually.
3434    ret = protect_pages_individually(addr, bytes, p, &old_status);
3435  } else {
3436    ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3437  }
3438#ifdef ASSERT
3439  if (!ret) {
3440    int err = os::get_last_error();
3441    char buf[256];
3442    size_t buf_len = os::lasterror(buf, sizeof(buf));
3443    warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3444          ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3445          buf_len != 0 ? buf : "<no_error_string>", err);
3446  }
3447#endif
3448  return ret;
3449}
3450
3451bool os::guard_memory(char* addr, size_t bytes) {
3452  DWORD old_status;
3453  return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3454}
3455
3456bool os::unguard_memory(char* addr, size_t bytes) {
3457  DWORD old_status;
3458  return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3459}
3460
3461void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3462void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3463void os::numa_make_global(char *addr, size_t bytes)    { }
3464void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3465bool os::numa_topology_changed()                       { return false; }
3466size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3467int os::numa_get_group_id()                            { return 0; }
3468size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3469  if (numa_node_list_holder.get_count() == 0 && size > 0) {
3470    // Provide an answer for UMA systems
3471    ids[0] = 0;
3472    return 1;
3473  } else {
3474    // check for size bigger than actual groups_num
3475    size = MIN2(size, numa_get_groups_num());
3476    for (int i = 0; i < (int)size; i++) {
3477      ids[i] = numa_node_list_holder.get_node_list_entry(i);
3478    }
3479    return size;
3480  }
3481}
3482
3483bool os::get_page_info(char *start, page_info* info) {
3484  return false;
3485}
3486
3487char *os::scan_pages(char *start, char* end, page_info* page_expected,
3488                     page_info* page_found) {
3489  return end;
3490}
3491
3492char* os::non_memory_address_word() {
3493  // Must never look like an address returned by reserve_memory,
3494  // even in its subfields (as defined by the CPU immediate fields,
3495  // if the CPU splits constants across multiple instructions).
3496  return (char*)-1;
3497}
3498
3499#define MAX_ERROR_COUNT 100
3500#define SYS_THREAD_ERROR 0xffffffffUL
3501
3502void os::pd_start_thread(Thread* thread) {
3503  DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3504  // Returns previous suspend state:
3505  // 0:  Thread was not suspended
3506  // 1:  Thread is running now
3507  // >1: Thread is still suspended.
3508  assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3509}
3510
3511class HighResolutionInterval : public CHeapObj<mtThread> {
3512  // The default timer resolution seems to be 10 milliseconds.
3513  // (Where is this written down?)
3514  // If someone wants to sleep for only a fraction of the default,
3515  // then we set the timer resolution down to 1 millisecond for
3516  // the duration of their interval.
3517  // We carefully set the resolution back, since otherwise we
3518  // seem to incur an overhead (3%?) that we don't need.
3519  // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3520  // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3521  // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3522  // timeBeginPeriod() if the relative error exceeded some threshold.
3523  // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3524  // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3525  // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3526  // resolution timers running.
3527 private:
3528  jlong resolution;
3529 public:
3530  HighResolutionInterval(jlong ms) {
3531    resolution = ms % 10L;
3532    if (resolution != 0) {
3533      MMRESULT result = timeBeginPeriod(1L);
3534    }
3535  }
3536  ~HighResolutionInterval() {
3537    if (resolution != 0) {
3538      MMRESULT result = timeEndPeriod(1L);
3539    }
3540    resolution = 0L;
3541  }
3542};
3543
3544int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3545  jlong limit = (jlong) MAXDWORD;
3546
3547  while (ms > limit) {
3548    int res;
3549    if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3550      return res;
3551    }
3552    ms -= limit;
3553  }
3554
3555  assert(thread == Thread::current(), "thread consistency check");
3556  OSThread* osthread = thread->osthread();
3557  OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3558  int result;
3559  if (interruptable) {
3560    assert(thread->is_Java_thread(), "must be java thread");
3561    JavaThread *jt = (JavaThread *) thread;
3562    ThreadBlockInVM tbivm(jt);
3563
3564    jt->set_suspend_equivalent();
3565    // cleared by handle_special_suspend_equivalent_condition() or
3566    // java_suspend_self() via check_and_wait_while_suspended()
3567
3568    HANDLE events[1];
3569    events[0] = osthread->interrupt_event();
3570    HighResolutionInterval *phri=NULL;
3571    if (!ForceTimeHighResolution) {
3572      phri = new HighResolutionInterval(ms);
3573    }
3574    if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3575      result = OS_TIMEOUT;
3576    } else {
3577      ResetEvent(osthread->interrupt_event());
3578      osthread->set_interrupted(false);
3579      result = OS_INTRPT;
3580    }
3581    delete phri; //if it is NULL, harmless
3582
3583    // were we externally suspended while we were waiting?
3584    jt->check_and_wait_while_suspended();
3585  } else {
3586    assert(!thread->is_Java_thread(), "must not be java thread");
3587    Sleep((long) ms);
3588    result = OS_TIMEOUT;
3589  }
3590  return result;
3591}
3592
3593// Short sleep, direct OS call.
3594//
3595// ms = 0, means allow others (if any) to run.
3596//
3597void os::naked_short_sleep(jlong ms) {
3598  assert(ms < 1000, "Un-interruptable sleep, short time use only");
3599  Sleep(ms);
3600}
3601
3602// Sleep forever; naked call to OS-specific sleep; use with CAUTION
3603void os::infinite_sleep() {
3604  while (true) {    // sleep forever ...
3605    Sleep(100000);  // ... 100 seconds at a time
3606  }
3607}
3608
3609typedef BOOL (WINAPI * STTSignature)(void);
3610
3611void os::naked_yield() {
3612  // Consider passing back the return value from SwitchToThread().
3613  SwitchToThread();
3614}
3615
3616// Win32 only gives you access to seven real priorities at a time,
3617// so we compress Java's ten down to seven.  It would be better
3618// if we dynamically adjusted relative priorities.
3619
3620int os::java_to_os_priority[CriticalPriority + 1] = {
3621  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3622  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3623  THREAD_PRIORITY_LOWEST,                       // 2
3624  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3625  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3626  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3627  THREAD_PRIORITY_NORMAL,                       // 6
3628  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3629  THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3630  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3631  THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3632  THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3633};
3634
3635int prio_policy1[CriticalPriority + 1] = {
3636  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3637  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3638  THREAD_PRIORITY_LOWEST,                       // 2
3639  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3640  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3641  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3642  THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3643  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3644  THREAD_PRIORITY_HIGHEST,                      // 8
3645  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3646  THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3647  THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3648};
3649
3650static int prio_init() {
3651  // If ThreadPriorityPolicy is 1, switch tables
3652  if (ThreadPriorityPolicy == 1) {
3653    int i;
3654    for (i = 0; i < CriticalPriority + 1; i++) {
3655      os::java_to_os_priority[i] = prio_policy1[i];
3656    }
3657  }
3658  if (UseCriticalJavaThreadPriority) {
3659    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3660  }
3661  return 0;
3662}
3663
3664OSReturn os::set_native_priority(Thread* thread, int priority) {
3665  if (!UseThreadPriorities) return OS_OK;
3666  bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3667  return ret ? OS_OK : OS_ERR;
3668}
3669
3670OSReturn os::get_native_priority(const Thread* const thread,
3671                                 int* priority_ptr) {
3672  if (!UseThreadPriorities) {
3673    *priority_ptr = java_to_os_priority[NormPriority];
3674    return OS_OK;
3675  }
3676  int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3677  if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3678    assert(false, "GetThreadPriority failed");
3679    return OS_ERR;
3680  }
3681  *priority_ptr = os_prio;
3682  return OS_OK;
3683}
3684
3685
3686// Hint to the underlying OS that a task switch would not be good.
3687// Void return because it's a hint and can fail.
3688void os::hint_no_preempt() {}
3689
3690void os::interrupt(Thread* thread) {
3691  assert(!thread->is_Java_thread() || Thread::current() == thread ||
3692         Threads_lock->owned_by_self(),
3693         "possibility of dangling Thread pointer");
3694
3695  OSThread* osthread = thread->osthread();
3696  osthread->set_interrupted(true);
3697  // More than one thread can get here with the same value of osthread,
3698  // resulting in multiple notifications.  We do, however, want the store
3699  // to interrupted() to be visible to other threads before we post
3700  // the interrupt event.
3701  OrderAccess::release();
3702  SetEvent(osthread->interrupt_event());
3703  // For JSR166:  unpark after setting status
3704  if (thread->is_Java_thread()) {
3705    ((JavaThread*)thread)->parker()->unpark();
3706  }
3707
3708  ParkEvent * ev = thread->_ParkEvent;
3709  if (ev != NULL) ev->unpark();
3710}
3711
3712
3713bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3714  assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3715         "possibility of dangling Thread pointer");
3716
3717  OSThread* osthread = thread->osthread();
3718  // There is no synchronization between the setting of the interrupt
3719  // and it being cleared here. It is critical - see 6535709 - that
3720  // we only clear the interrupt state, and reset the interrupt event,
3721  // if we are going to report that we were indeed interrupted - else
3722  // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3723  // depending on the timing. By checking thread interrupt event to see
3724  // if the thread gets real interrupt thus prevent spurious wakeup.
3725  bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3726  if (interrupted && clear_interrupted) {
3727    osthread->set_interrupted(false);
3728    ResetEvent(osthread->interrupt_event());
3729  } // Otherwise leave the interrupted state alone
3730
3731  return interrupted;
3732}
3733
3734// Get's a pc (hint) for a running thread. Currently used only for profiling.
3735ExtendedPC os::get_thread_pc(Thread* thread) {
3736  CONTEXT context;
3737  context.ContextFlags = CONTEXT_CONTROL;
3738  HANDLE handle = thread->osthread()->thread_handle();
3739#ifdef _M_IA64
3740  assert(0, "Fix get_thread_pc");
3741  return ExtendedPC(NULL);
3742#else
3743  if (GetThreadContext(handle, &context)) {
3744#ifdef _M_AMD64
3745    return ExtendedPC((address) context.Rip);
3746#else
3747    return ExtendedPC((address) context.Eip);
3748#endif
3749  } else {
3750    return ExtendedPC(NULL);
3751  }
3752#endif
3753}
3754
3755// GetCurrentThreadId() returns DWORD
3756intx os::current_thread_id()  { return GetCurrentThreadId(); }
3757
3758static int _initial_pid = 0;
3759
3760int os::current_process_id() {
3761  return (_initial_pid ? _initial_pid : _getpid());
3762}
3763
3764int    os::win32::_vm_page_size              = 0;
3765int    os::win32::_vm_allocation_granularity = 0;
3766int    os::win32::_processor_type            = 0;
3767// Processor level is not available on non-NT systems, use vm_version instead
3768int    os::win32::_processor_level           = 0;
3769julong os::win32::_physical_memory           = 0;
3770size_t os::win32::_default_stack_size        = 0;
3771
3772intx          os::win32::_os_thread_limit    = 0;
3773volatile intx os::win32::_os_thread_count    = 0;
3774
3775bool   os::win32::_is_windows_server         = false;
3776
3777// 6573254
3778// Currently, the bug is observed across all the supported Windows releases,
3779// including the latest one (as of this writing - Windows Server 2012 R2)
3780bool   os::win32::_has_exit_bug              = true;
3781
3782void os::win32::initialize_system_info() {
3783  SYSTEM_INFO si;
3784  GetSystemInfo(&si);
3785  _vm_page_size    = si.dwPageSize;
3786  _vm_allocation_granularity = si.dwAllocationGranularity;
3787  _processor_type  = si.dwProcessorType;
3788  _processor_level = si.wProcessorLevel;
3789  set_processor_count(si.dwNumberOfProcessors);
3790
3791  MEMORYSTATUSEX ms;
3792  ms.dwLength = sizeof(ms);
3793
3794  // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3795  // dwMemoryLoad (% of memory in use)
3796  GlobalMemoryStatusEx(&ms);
3797  _physical_memory = ms.ullTotalPhys;
3798
3799  if (FLAG_IS_DEFAULT(MaxRAM)) {
3800    // Adjust MaxRAM according to the maximum virtual address space available.
3801    FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3802  }
3803
3804  OSVERSIONINFOEX oi;
3805  oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3806  GetVersionEx((OSVERSIONINFO*)&oi);
3807  switch (oi.dwPlatformId) {
3808  case VER_PLATFORM_WIN32_NT:
3809    {
3810      int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3811      if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3812          oi.wProductType == VER_NT_SERVER) {
3813        _is_windows_server = true;
3814      }
3815    }
3816    break;
3817  default: fatal("Unknown platform");
3818  }
3819
3820  _default_stack_size = os::current_stack_size();
3821  assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3822  assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3823         "stack size not a multiple of page size");
3824
3825  initialize_performance_counter();
3826}
3827
3828
3829HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3830                                      int ebuflen) {
3831  char path[MAX_PATH];
3832  DWORD size;
3833  DWORD pathLen = (DWORD)sizeof(path);
3834  HINSTANCE result = NULL;
3835
3836  // only allow library name without path component
3837  assert(strchr(name, '\\') == NULL, "path not allowed");
3838  assert(strchr(name, ':') == NULL, "path not allowed");
3839  if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3840    jio_snprintf(ebuf, ebuflen,
3841                 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3842    return NULL;
3843  }
3844
3845  // search system directory
3846  if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3847    if (size >= pathLen) {
3848      return NULL; // truncated
3849    }
3850    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3851      return NULL; // truncated
3852    }
3853    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3854      return result;
3855    }
3856  }
3857
3858  // try Windows directory
3859  if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3860    if (size >= pathLen) {
3861      return NULL; // truncated
3862    }
3863    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3864      return NULL; // truncated
3865    }
3866    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3867      return result;
3868    }
3869  }
3870
3871  jio_snprintf(ebuf, ebuflen,
3872               "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3873  return NULL;
3874}
3875
3876#define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3877#define EXIT_TIMEOUT 300000 /* 5 minutes */
3878
3879static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3880  InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3881  return TRUE;
3882}
3883
3884int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3885  // Basic approach:
3886  //  - Each exiting thread registers its intent to exit and then does so.
3887  //  - A thread trying to terminate the process must wait for all
3888  //    threads currently exiting to complete their exit.
3889
3890  if (os::win32::has_exit_bug()) {
3891    // The array holds handles of the threads that have started exiting by calling
3892    // _endthreadex().
3893    // Should be large enough to avoid blocking the exiting thread due to lack of
3894    // a free slot.
3895    static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3896    static int handle_count = 0;
3897
3898    static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3899    static CRITICAL_SECTION crit_sect;
3900    static volatile jint process_exiting = 0;
3901    int i, j;
3902    DWORD res;
3903    HANDLE hproc, hthr;
3904
3905    // We only attempt to register threads until a process exiting
3906    // thread manages to set the process_exiting flag. Any threads
3907    // that come through here after the process_exiting flag is set
3908    // are unregistered and will be caught in the SuspendThread()
3909    // infinite loop below.
3910    bool registered = false;
3911
3912    // The first thread that reached this point, initializes the critical section.
3913    if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3914      warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3915    } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3916      if (what != EPT_THREAD) {
3917        // Atomically set process_exiting before the critical section
3918        // to increase the visibility between racing threads.
3919        Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3920      }
3921      EnterCriticalSection(&crit_sect);
3922
3923      if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3924        // Remove from the array those handles of the threads that have completed exiting.
3925        for (i = 0, j = 0; i < handle_count; ++i) {
3926          res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3927          if (res == WAIT_TIMEOUT) {
3928            handles[j++] = handles[i];
3929          } else {
3930            if (res == WAIT_FAILED) {
3931              warning("WaitForSingleObject failed (%u) in %s: %d\n",
3932                      GetLastError(), __FILE__, __LINE__);
3933            }
3934            // Don't keep the handle, if we failed waiting for it.
3935            CloseHandle(handles[i]);
3936          }
3937        }
3938
3939        // If there's no free slot in the array of the kept handles, we'll have to
3940        // wait until at least one thread completes exiting.
3941        if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3942          // Raise the priority of the oldest exiting thread to increase its chances
3943          // to complete sooner.
3944          SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3945          res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3946          if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3947            i = (res - WAIT_OBJECT_0);
3948            handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3949            for (; i < handle_count; ++i) {
3950              handles[i] = handles[i + 1];
3951            }
3952          } else {
3953            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3954                    (res == WAIT_FAILED ? "failed" : "timed out"),
3955                    GetLastError(), __FILE__, __LINE__);
3956            // Don't keep handles, if we failed waiting for them.
3957            for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3958              CloseHandle(handles[i]);
3959            }
3960            handle_count = 0;
3961          }
3962        }
3963
3964        // Store a duplicate of the current thread handle in the array of handles.
3965        hproc = GetCurrentProcess();
3966        hthr = GetCurrentThread();
3967        if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3968                             0, FALSE, DUPLICATE_SAME_ACCESS)) {
3969          warning("DuplicateHandle failed (%u) in %s: %d\n",
3970                  GetLastError(), __FILE__, __LINE__);
3971
3972          // We can't register this thread (no more handles) so this thread
3973          // may be racing with a thread that is calling exit(). If the thread
3974          // that is calling exit() has managed to set the process_exiting
3975          // flag, then this thread will be caught in the SuspendThread()
3976          // infinite loop below which closes that race. A small timing
3977          // window remains before the process_exiting flag is set, but it
3978          // is only exposed when we are out of handles.
3979        } else {
3980          ++handle_count;
3981          registered = true;
3982
3983          // The current exiting thread has stored its handle in the array, and now
3984          // should leave the critical section before calling _endthreadex().
3985        }
3986
3987      } else if (what != EPT_THREAD && handle_count > 0) {
3988        jlong start_time, finish_time, timeout_left;
3989        // Before ending the process, make sure all the threads that had called
3990        // _endthreadex() completed.
3991
3992        // Set the priority level of the current thread to the same value as
3993        // the priority level of exiting threads.
3994        // This is to ensure it will be given a fair chance to execute if
3995        // the timeout expires.
3996        hthr = GetCurrentThread();
3997        SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3998        start_time = os::javaTimeNanos();
3999        finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4000        for (i = 0; ; ) {
4001          int portion_count = handle_count - i;
4002          if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4003            portion_count = MAXIMUM_WAIT_OBJECTS;
4004          }
4005          for (j = 0; j < portion_count; ++j) {
4006            SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4007          }
4008          timeout_left = (finish_time - start_time) / 1000000L;
4009          if (timeout_left < 0) {
4010            timeout_left = 0;
4011          }
4012          res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4013          if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4014            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4015                    (res == WAIT_FAILED ? "failed" : "timed out"),
4016                    GetLastError(), __FILE__, __LINE__);
4017            // Reset portion_count so we close the remaining
4018            // handles due to this error.
4019            portion_count = handle_count - i;
4020          }
4021          for (j = 0; j < portion_count; ++j) {
4022            CloseHandle(handles[i + j]);
4023          }
4024          if ((i += portion_count) >= handle_count) {
4025            break;
4026          }
4027          start_time = os::javaTimeNanos();
4028        }
4029        handle_count = 0;
4030      }
4031
4032      LeaveCriticalSection(&crit_sect);
4033    }
4034
4035    if (!registered &&
4036        OrderAccess::load_acquire(&process_exiting) != 0 &&
4037        process_exiting != (jint)GetCurrentThreadId()) {
4038      // Some other thread is about to call exit(), so we don't let
4039      // the current unregistered thread proceed to exit() or _endthreadex()
4040      while (true) {
4041        SuspendThread(GetCurrentThread());
4042        // Avoid busy-wait loop, if SuspendThread() failed.
4043        Sleep(EXIT_TIMEOUT);
4044      }
4045    }
4046  }
4047
4048  // We are here if either
4049  // - there's no 'race at exit' bug on this OS release;
4050  // - initialization of the critical section failed (unlikely);
4051  // - the current thread has registered itself and left the critical section;
4052  // - the process-exiting thread has raised the flag and left the critical section.
4053  if (what == EPT_THREAD) {
4054    _endthreadex((unsigned)exit_code);
4055  } else if (what == EPT_PROCESS) {
4056    ::exit(exit_code);
4057  } else {
4058    _exit(exit_code);
4059  }
4060
4061  // Should not reach here
4062  return exit_code;
4063}
4064
4065#undef EXIT_TIMEOUT
4066
4067void os::win32::setmode_streams() {
4068  _setmode(_fileno(stdin), _O_BINARY);
4069  _setmode(_fileno(stdout), _O_BINARY);
4070  _setmode(_fileno(stderr), _O_BINARY);
4071}
4072
4073
4074bool os::is_debugger_attached() {
4075  return IsDebuggerPresent() ? true : false;
4076}
4077
4078
4079void os::wait_for_keypress_at_exit(void) {
4080  if (PauseAtExit) {
4081    fprintf(stderr, "Press any key to continue...\n");
4082    fgetc(stdin);
4083  }
4084}
4085
4086
4087bool os::message_box(const char* title, const char* message) {
4088  int result = MessageBox(NULL, message, title,
4089                          MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4090  return result == IDYES;
4091}
4092
4093#ifndef PRODUCT
4094#ifndef _WIN64
4095// Helpers to check whether NX protection is enabled
4096int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4097  if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4098      pex->ExceptionRecord->NumberParameters > 0 &&
4099      pex->ExceptionRecord->ExceptionInformation[0] ==
4100      EXCEPTION_INFO_EXEC_VIOLATION) {
4101    return EXCEPTION_EXECUTE_HANDLER;
4102  }
4103  return EXCEPTION_CONTINUE_SEARCH;
4104}
4105
4106void nx_check_protection() {
4107  // If NX is enabled we'll get an exception calling into code on the stack
4108  char code[] = { (char)0xC3 }; // ret
4109  void *code_ptr = (void *)code;
4110  __try {
4111    __asm call code_ptr
4112  } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4113    tty->print_raw_cr("NX protection detected.");
4114  }
4115}
4116#endif // _WIN64
4117#endif // PRODUCT
4118
4119// This is called _before_ the global arguments have been parsed
4120void os::init(void) {
4121  _initial_pid = _getpid();
4122
4123  init_random(1234567);
4124
4125  win32::initialize_system_info();
4126  win32::setmode_streams();
4127  init_page_sizes((size_t) win32::vm_page_size());
4128
4129  // This may be overridden later when argument processing is done.
4130  FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4131
4132  // Initialize main_process and main_thread
4133  main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4134  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4135                       &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4136    fatal("DuplicateHandle failed\n");
4137  }
4138  main_thread_id = (int) GetCurrentThreadId();
4139
4140  // initialize fast thread access - only used for 32-bit
4141  win32::initialize_thread_ptr_offset();
4142}
4143
4144// To install functions for atexit processing
4145extern "C" {
4146  static void perfMemory_exit_helper() {
4147    perfMemory_exit();
4148  }
4149}
4150
4151static jint initSock();
4152
4153// this is called _after_ the global arguments have been parsed
4154jint os::init_2(void) {
4155  // Allocate a single page and mark it as readable for safepoint polling
4156  address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4157  guarantee(polling_page != NULL, "Reserve Failed for polling page");
4158
4159  address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4160  guarantee(return_page != NULL, "Commit Failed for polling page");
4161
4162  os::set_polling_page(polling_page);
4163  log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4164
4165  if (!UseMembar) {
4166    address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4167    guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4168
4169    return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4170    guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4171
4172    os::set_memory_serialize_page(mem_serialize_page);
4173    log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4174  }
4175
4176  // Setup Windows Exceptions
4177
4178  // for debugging float code generation bugs
4179  if (ForceFloatExceptions) {
4180#ifndef  _WIN64
4181    static long fp_control_word = 0;
4182    __asm { fstcw fp_control_word }
4183    // see Intel PPro Manual, Vol. 2, p 7-16
4184    const long precision = 0x20;
4185    const long underflow = 0x10;
4186    const long overflow  = 0x08;
4187    const long zero_div  = 0x04;
4188    const long denorm    = 0x02;
4189    const long invalid   = 0x01;
4190    fp_control_word |= invalid;
4191    __asm { fldcw fp_control_word }
4192#endif
4193  }
4194
4195  // If stack_commit_size is 0, windows will reserve the default size,
4196  // but only commit a small portion of it.
4197  size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4198  size_t default_reserve_size = os::win32::default_stack_size();
4199  size_t actual_reserve_size = stack_commit_size;
4200  if (stack_commit_size < default_reserve_size) {
4201    // If stack_commit_size == 0, we want this too
4202    actual_reserve_size = default_reserve_size;
4203  }
4204
4205  // Check minimum allowable stack size for thread creation and to initialize
4206  // the java system classes, including StackOverflowError - depends on page
4207  // size.  Add two 4K pages for compiler2 recursion in main thread.
4208  // Add in 4*BytesPerWord 4K pages to account for VM stack during
4209  // class initialization depending on 32 or 64 bit VM.
4210  size_t min_stack_allowed =
4211            (size_t)(JavaThread::stack_guard_zone_size() +
4212                     JavaThread::stack_shadow_zone_size() +
4213                     (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4214
4215  min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4216
4217  if (actual_reserve_size < min_stack_allowed) {
4218    tty->print_cr("\nThe stack size specified is too small, "
4219                  "Specify at least %dk",
4220                  min_stack_allowed / K);
4221    return JNI_ERR;
4222  }
4223
4224  JavaThread::set_stack_size_at_create(stack_commit_size);
4225
4226  // Calculate theoretical max. size of Threads to guard gainst artifical
4227  // out-of-memory situations, where all available address-space has been
4228  // reserved by thread stacks.
4229  assert(actual_reserve_size != 0, "Must have a stack");
4230
4231  // Calculate the thread limit when we should start doing Virtual Memory
4232  // banging. Currently when the threads will have used all but 200Mb of space.
4233  //
4234  // TODO: consider performing a similar calculation for commit size instead
4235  // as reserve size, since on a 64-bit platform we'll run into that more
4236  // often than running out of virtual memory space.  We can use the
4237  // lower value of the two calculations as the os_thread_limit.
4238  size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4239  win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4240
4241  // at exit methods are called in the reverse order of their registration.
4242  // there is no limit to the number of functions registered. atexit does
4243  // not set errno.
4244
4245  if (PerfAllowAtExitRegistration) {
4246    // only register atexit functions if PerfAllowAtExitRegistration is set.
4247    // atexit functions can be delayed until process exit time, which
4248    // can be problematic for embedded VM situations. Embedded VMs should
4249    // call DestroyJavaVM() to assure that VM resources are released.
4250
4251    // note: perfMemory_exit_helper atexit function may be removed in
4252    // the future if the appropriate cleanup code can be added to the
4253    // VM_Exit VMOperation's doit method.
4254    if (atexit(perfMemory_exit_helper) != 0) {
4255      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4256    }
4257  }
4258
4259#ifndef _WIN64
4260  // Print something if NX is enabled (win32 on AMD64)
4261  NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4262#endif
4263
4264  // initialize thread priority policy
4265  prio_init();
4266
4267  if (UseNUMA && !ForceNUMA) {
4268    UseNUMA = false; // We don't fully support this yet
4269  }
4270
4271  if (UseNUMAInterleaving) {
4272    // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4273    bool success = numa_interleaving_init();
4274    if (!success) UseNUMAInterleaving = false;
4275  }
4276
4277  if (initSock() != JNI_OK) {
4278    return JNI_ERR;
4279  }
4280
4281  return JNI_OK;
4282}
4283
4284// Mark the polling page as unreadable
4285void os::make_polling_page_unreadable(void) {
4286  DWORD old_status;
4287  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4288                      PAGE_NOACCESS, &old_status)) {
4289    fatal("Could not disable polling page");
4290  }
4291}
4292
4293// Mark the polling page as readable
4294void os::make_polling_page_readable(void) {
4295  DWORD old_status;
4296  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4297                      PAGE_READONLY, &old_status)) {
4298    fatal("Could not enable polling page");
4299  }
4300}
4301
4302
4303int os::stat(const char *path, struct stat *sbuf) {
4304  char pathbuf[MAX_PATH];
4305  if (strlen(path) > MAX_PATH - 1) {
4306    errno = ENAMETOOLONG;
4307    return -1;
4308  }
4309  os::native_path(strcpy(pathbuf, path));
4310  int ret = ::stat(pathbuf, sbuf);
4311  if (sbuf != NULL && UseUTCFileTimestamp) {
4312    // Fix for 6539723.  st_mtime returned from stat() is dependent on
4313    // the system timezone and so can return different values for the
4314    // same file if/when daylight savings time changes.  This adjustment
4315    // makes sure the same timestamp is returned regardless of the TZ.
4316    //
4317    // See:
4318    // http://msdn.microsoft.com/library/
4319    //   default.asp?url=/library/en-us/sysinfo/base/
4320    //   time_zone_information_str.asp
4321    // and
4322    // http://msdn.microsoft.com/library/default.asp?url=
4323    //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4324    //
4325    // NOTE: there is a insidious bug here:  If the timezone is changed
4326    // after the call to stat() but before 'GetTimeZoneInformation()', then
4327    // the adjustment we do here will be wrong and we'll return the wrong
4328    // value (which will likely end up creating an invalid class data
4329    // archive).  Absent a better API for this, or some time zone locking
4330    // mechanism, we'll have to live with this risk.
4331    TIME_ZONE_INFORMATION tz;
4332    DWORD tzid = GetTimeZoneInformation(&tz);
4333    int daylightBias =
4334      (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4335    sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4336  }
4337  return ret;
4338}
4339
4340
4341#define FT2INT64(ft) \
4342  ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4343
4344
4345// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4346// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4347// of a thread.
4348//
4349// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4350// the fast estimate available on the platform.
4351
4352// current_thread_cpu_time() is not optimized for Windows yet
4353jlong os::current_thread_cpu_time() {
4354  // return user + sys since the cost is the same
4355  return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4356}
4357
4358jlong os::thread_cpu_time(Thread* thread) {
4359  // consistent with what current_thread_cpu_time() returns.
4360  return os::thread_cpu_time(thread, true /* user+sys */);
4361}
4362
4363jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4364  return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4365}
4366
4367jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4368  // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4369  // If this function changes, os::is_thread_cpu_time_supported() should too
4370  FILETIME CreationTime;
4371  FILETIME ExitTime;
4372  FILETIME KernelTime;
4373  FILETIME UserTime;
4374
4375  if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4376                      &ExitTime, &KernelTime, &UserTime) == 0) {
4377    return -1;
4378  } else if (user_sys_cpu_time) {
4379    return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4380  } else {
4381    return FT2INT64(UserTime) * 100;
4382  }
4383}
4384
4385void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4386  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4387  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4388  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4389  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4390}
4391
4392void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4393  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4394  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4395  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4396  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4397}
4398
4399bool os::is_thread_cpu_time_supported() {
4400  // see os::thread_cpu_time
4401  FILETIME CreationTime;
4402  FILETIME ExitTime;
4403  FILETIME KernelTime;
4404  FILETIME UserTime;
4405
4406  if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4407                      &KernelTime, &UserTime) == 0) {
4408    return false;
4409  } else {
4410    return true;
4411  }
4412}
4413
4414// Windows does't provide a loadavg primitive so this is stubbed out for now.
4415// It does have primitives (PDH API) to get CPU usage and run queue length.
4416// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4417// If we wanted to implement loadavg on Windows, we have a few options:
4418//
4419// a) Query CPU usage and run queue length and "fake" an answer by
4420//    returning the CPU usage if it's under 100%, and the run queue
4421//    length otherwise.  It turns out that querying is pretty slow
4422//    on Windows, on the order of 200 microseconds on a fast machine.
4423//    Note that on the Windows the CPU usage value is the % usage
4424//    since the last time the API was called (and the first call
4425//    returns 100%), so we'd have to deal with that as well.
4426//
4427// b) Sample the "fake" answer using a sampling thread and store
4428//    the answer in a global variable.  The call to loadavg would
4429//    just return the value of the global, avoiding the slow query.
4430//
4431// c) Sample a better answer using exponential decay to smooth the
4432//    value.  This is basically the algorithm used by UNIX kernels.
4433//
4434// Note that sampling thread starvation could affect both (b) and (c).
4435int os::loadavg(double loadavg[], int nelem) {
4436  return -1;
4437}
4438
4439
4440// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4441bool os::dont_yield() {
4442  return DontYieldALot;
4443}
4444
4445// This method is a slightly reworked copy of JDK's sysOpen
4446// from src/windows/hpi/src/sys_api_md.c
4447
4448int os::open(const char *path, int oflag, int mode) {
4449  char pathbuf[MAX_PATH];
4450
4451  if (strlen(path) > MAX_PATH - 1) {
4452    errno = ENAMETOOLONG;
4453    return -1;
4454  }
4455  os::native_path(strcpy(pathbuf, path));
4456  return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4457}
4458
4459FILE* os::open(int fd, const char* mode) {
4460  return ::_fdopen(fd, mode);
4461}
4462
4463// Is a (classpath) directory empty?
4464bool os::dir_is_empty(const char* path) {
4465  WIN32_FIND_DATA fd;
4466  HANDLE f = FindFirstFile(path, &fd);
4467  if (f == INVALID_HANDLE_VALUE) {
4468    return true;
4469  }
4470  FindClose(f);
4471  return false;
4472}
4473
4474// create binary file, rewriting existing file if required
4475int os::create_binary_file(const char* path, bool rewrite_existing) {
4476  int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4477  if (!rewrite_existing) {
4478    oflags |= _O_EXCL;
4479  }
4480  return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4481}
4482
4483// return current position of file pointer
4484jlong os::current_file_offset(int fd) {
4485  return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4486}
4487
4488// move file pointer to the specified offset
4489jlong os::seek_to_file_offset(int fd, jlong offset) {
4490  return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4491}
4492
4493
4494jlong os::lseek(int fd, jlong offset, int whence) {
4495  return (jlong) ::_lseeki64(fd, offset, whence);
4496}
4497
4498size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4499  OVERLAPPED ov;
4500  DWORD nread;
4501  BOOL result;
4502
4503  ZeroMemory(&ov, sizeof(ov));
4504  ov.Offset = (DWORD)offset;
4505  ov.OffsetHigh = (DWORD)(offset >> 32);
4506
4507  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4508
4509  result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4510
4511  return result ? nread : 0;
4512}
4513
4514
4515// This method is a slightly reworked copy of JDK's sysNativePath
4516// from src/windows/hpi/src/path_md.c
4517
4518// Convert a pathname to native format.  On win32, this involves forcing all
4519// separators to be '\\' rather than '/' (both are legal inputs, but Win95
4520// sometimes rejects '/') and removing redundant separators.  The input path is
4521// assumed to have been converted into the character encoding used by the local
4522// system.  Because this might be a double-byte encoding, care is taken to
4523// treat double-byte lead characters correctly.
4524//
4525// This procedure modifies the given path in place, as the result is never
4526// longer than the original.  There is no error return; this operation always
4527// succeeds.
4528char * os::native_path(char *path) {
4529  char *src = path, *dst = path, *end = path;
4530  char *colon = NULL;  // If a drive specifier is found, this will
4531                       // point to the colon following the drive letter
4532
4533  // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4534  assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4535          && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4536
4537  // Check for leading separators
4538#define isfilesep(c) ((c) == '/' || (c) == '\\')
4539  while (isfilesep(*src)) {
4540    src++;
4541  }
4542
4543  if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4544    // Remove leading separators if followed by drive specifier.  This
4545    // hack is necessary to support file URLs containing drive
4546    // specifiers (e.g., "file://c:/path").  As a side effect,
4547    // "/c:/path" can be used as an alternative to "c:/path".
4548    *dst++ = *src++;
4549    colon = dst;
4550    *dst++ = ':';
4551    src++;
4552  } else {
4553    src = path;
4554    if (isfilesep(src[0]) && isfilesep(src[1])) {
4555      // UNC pathname: Retain first separator; leave src pointed at
4556      // second separator so that further separators will be collapsed
4557      // into the second separator.  The result will be a pathname
4558      // beginning with "\\\\" followed (most likely) by a host name.
4559      src = dst = path + 1;
4560      path[0] = '\\';     // Force first separator to '\\'
4561    }
4562  }
4563
4564  end = dst;
4565
4566  // Remove redundant separators from remainder of path, forcing all
4567  // separators to be '\\' rather than '/'. Also, single byte space
4568  // characters are removed from the end of the path because those
4569  // are not legal ending characters on this operating system.
4570  //
4571  while (*src != '\0') {
4572    if (isfilesep(*src)) {
4573      *dst++ = '\\'; src++;
4574      while (isfilesep(*src)) src++;
4575      if (*src == '\0') {
4576        // Check for trailing separator
4577        end = dst;
4578        if (colon == dst - 2) break;  // "z:\\"
4579        if (dst == path + 1) break;   // "\\"
4580        if (dst == path + 2 && isfilesep(path[0])) {
4581          // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4582          // beginning of a UNC pathname.  Even though it is not, by
4583          // itself, a valid UNC pathname, we leave it as is in order
4584          // to be consistent with the path canonicalizer as well
4585          // as the win32 APIs, which treat this case as an invalid
4586          // UNC pathname rather than as an alias for the root
4587          // directory of the current drive.
4588          break;
4589        }
4590        end = --dst;  // Path does not denote a root directory, so
4591                      // remove trailing separator
4592        break;
4593      }
4594      end = dst;
4595    } else {
4596      if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4597        *dst++ = *src++;
4598        if (*src) *dst++ = *src++;
4599        end = dst;
4600      } else {  // Copy a single-byte character
4601        char c = *src++;
4602        *dst++ = c;
4603        // Space is not a legal ending character
4604        if (c != ' ') end = dst;
4605      }
4606    }
4607  }
4608
4609  *end = '\0';
4610
4611  // For "z:", add "." to work around a bug in the C runtime library
4612  if (colon == dst - 1) {
4613    path[2] = '.';
4614    path[3] = '\0';
4615  }
4616
4617  return path;
4618}
4619
4620// This code is a copy of JDK's sysSetLength
4621// from src/windows/hpi/src/sys_api_md.c
4622
4623int os::ftruncate(int fd, jlong length) {
4624  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4625  long high = (long)(length >> 32);
4626  DWORD ret;
4627
4628  if (h == (HANDLE)(-1)) {
4629    return -1;
4630  }
4631
4632  ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4633  if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4634    return -1;
4635  }
4636
4637  if (::SetEndOfFile(h) == FALSE) {
4638    return -1;
4639  }
4640
4641  return 0;
4642}
4643
4644int os::get_fileno(FILE* fp) {
4645  return _fileno(fp);
4646}
4647
4648// This code is a copy of JDK's sysSync
4649// from src/windows/hpi/src/sys_api_md.c
4650// except for the legacy workaround for a bug in Win 98
4651
4652int os::fsync(int fd) {
4653  HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4654
4655  if ((!::FlushFileBuffers(handle)) &&
4656      (GetLastError() != ERROR_ACCESS_DENIED)) {
4657    // from winerror.h
4658    return -1;
4659  }
4660  return 0;
4661}
4662
4663static int nonSeekAvailable(int, long *);
4664static int stdinAvailable(int, long *);
4665
4666#define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4667#define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4668
4669// This code is a copy of JDK's sysAvailable
4670// from src/windows/hpi/src/sys_api_md.c
4671
4672int os::available(int fd, jlong *bytes) {
4673  jlong cur, end;
4674  struct _stati64 stbuf64;
4675
4676  if (::_fstati64(fd, &stbuf64) >= 0) {
4677    int mode = stbuf64.st_mode;
4678    if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4679      int ret;
4680      long lpbytes;
4681      if (fd == 0) {
4682        ret = stdinAvailable(fd, &lpbytes);
4683      } else {
4684        ret = nonSeekAvailable(fd, &lpbytes);
4685      }
4686      (*bytes) = (jlong)(lpbytes);
4687      return ret;
4688    }
4689    if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4690      return FALSE;
4691    } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4692      return FALSE;
4693    } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4694      return FALSE;
4695    }
4696    *bytes = end - cur;
4697    return TRUE;
4698  } else {
4699    return FALSE;
4700  }
4701}
4702
4703void os::flockfile(FILE* fp) {
4704  _lock_file(fp);
4705}
4706
4707void os::funlockfile(FILE* fp) {
4708  _unlock_file(fp);
4709}
4710
4711// This code is a copy of JDK's nonSeekAvailable
4712// from src/windows/hpi/src/sys_api_md.c
4713
4714static int nonSeekAvailable(int fd, long *pbytes) {
4715  // This is used for available on non-seekable devices
4716  // (like both named and anonymous pipes, such as pipes
4717  //  connected to an exec'd process).
4718  // Standard Input is a special case.
4719  HANDLE han;
4720
4721  if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4722    return FALSE;
4723  }
4724
4725  if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4726    // PeekNamedPipe fails when at EOF.  In that case we
4727    // simply make *pbytes = 0 which is consistent with the
4728    // behavior we get on Solaris when an fd is at EOF.
4729    // The only alternative is to raise an Exception,
4730    // which isn't really warranted.
4731    //
4732    if (::GetLastError() != ERROR_BROKEN_PIPE) {
4733      return FALSE;
4734    }
4735    *pbytes = 0;
4736  }
4737  return TRUE;
4738}
4739
4740#define MAX_INPUT_EVENTS 2000
4741
4742// This code is a copy of JDK's stdinAvailable
4743// from src/windows/hpi/src/sys_api_md.c
4744
4745static int stdinAvailable(int fd, long *pbytes) {
4746  HANDLE han;
4747  DWORD numEventsRead = 0;  // Number of events read from buffer
4748  DWORD numEvents = 0;      // Number of events in buffer
4749  DWORD i = 0;              // Loop index
4750  DWORD curLength = 0;      // Position marker
4751  DWORD actualLength = 0;   // Number of bytes readable
4752  BOOL error = FALSE;       // Error holder
4753  INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4754
4755  if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4756    return FALSE;
4757  }
4758
4759  // Construct an array of input records in the console buffer
4760  error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4761  if (error == 0) {
4762    return nonSeekAvailable(fd, pbytes);
4763  }
4764
4765  // lpBuffer must fit into 64K or else PeekConsoleInput fails
4766  if (numEvents > MAX_INPUT_EVENTS) {
4767    numEvents = MAX_INPUT_EVENTS;
4768  }
4769
4770  lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4771  if (lpBuffer == NULL) {
4772    return FALSE;
4773  }
4774
4775  error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4776  if (error == 0) {
4777    os::free(lpBuffer);
4778    return FALSE;
4779  }
4780
4781  // Examine input records for the number of bytes available
4782  for (i=0; i<numEvents; i++) {
4783    if (lpBuffer[i].EventType == KEY_EVENT) {
4784
4785      KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4786                                      &(lpBuffer[i].Event);
4787      if (keyRecord->bKeyDown == TRUE) {
4788        CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4789        curLength++;
4790        if (*keyPressed == '\r') {
4791          actualLength = curLength;
4792        }
4793      }
4794    }
4795  }
4796
4797  if (lpBuffer != NULL) {
4798    os::free(lpBuffer);
4799  }
4800
4801  *pbytes = (long) actualLength;
4802  return TRUE;
4803}
4804
4805// Map a block of memory.
4806char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4807                        char *addr, size_t bytes, bool read_only,
4808                        bool allow_exec) {
4809  HANDLE hFile;
4810  char* base;
4811
4812  hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4813                     OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4814  if (hFile == NULL) {
4815    log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4816    return NULL;
4817  }
4818
4819  if (allow_exec) {
4820    // CreateFileMapping/MapViewOfFileEx can't map executable memory
4821    // unless it comes from a PE image (which the shared archive is not.)
4822    // Even VirtualProtect refuses to give execute access to mapped memory
4823    // that was not previously executable.
4824    //
4825    // Instead, stick the executable region in anonymous memory.  Yuck.
4826    // Penalty is that ~4 pages will not be shareable - in the future
4827    // we might consider DLLizing the shared archive with a proper PE
4828    // header so that mapping executable + sharing is possible.
4829
4830    base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4831                                PAGE_READWRITE);
4832    if (base == NULL) {
4833      log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4834      CloseHandle(hFile);
4835      return NULL;
4836    }
4837
4838    DWORD bytes_read;
4839    OVERLAPPED overlapped;
4840    overlapped.Offset = (DWORD)file_offset;
4841    overlapped.OffsetHigh = 0;
4842    overlapped.hEvent = NULL;
4843    // ReadFile guarantees that if the return value is true, the requested
4844    // number of bytes were read before returning.
4845    bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4846    if (!res) {
4847      log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4848      release_memory(base, bytes);
4849      CloseHandle(hFile);
4850      return NULL;
4851    }
4852  } else {
4853    HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4854                                    NULL /* file_name */);
4855    if (hMap == NULL) {
4856      log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4857      CloseHandle(hFile);
4858      return NULL;
4859    }
4860
4861    DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4862    base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4863                                  (DWORD)bytes, addr);
4864    if (base == NULL) {
4865      log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4866      CloseHandle(hMap);
4867      CloseHandle(hFile);
4868      return NULL;
4869    }
4870
4871    if (CloseHandle(hMap) == 0) {
4872      log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4873      CloseHandle(hFile);
4874      return base;
4875    }
4876  }
4877
4878  if (allow_exec) {
4879    DWORD old_protect;
4880    DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4881    bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4882
4883    if (!res) {
4884      log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4885      // Don't consider this a hard error, on IA32 even if the
4886      // VirtualProtect fails, we should still be able to execute
4887      CloseHandle(hFile);
4888      return base;
4889    }
4890  }
4891
4892  if (CloseHandle(hFile) == 0) {
4893    log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4894    return base;
4895  }
4896
4897  return base;
4898}
4899
4900
4901// Remap a block of memory.
4902char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4903                          char *addr, size_t bytes, bool read_only,
4904                          bool allow_exec) {
4905  // This OS does not allow existing memory maps to be remapped so we
4906  // have to unmap the memory before we remap it.
4907  if (!os::unmap_memory(addr, bytes)) {
4908    return NULL;
4909  }
4910
4911  // There is a very small theoretical window between the unmap_memory()
4912  // call above and the map_memory() call below where a thread in native
4913  // code may be able to access an address that is no longer mapped.
4914
4915  return os::map_memory(fd, file_name, file_offset, addr, bytes,
4916                        read_only, allow_exec);
4917}
4918
4919
4920// Unmap a block of memory.
4921// Returns true=success, otherwise false.
4922
4923bool os::pd_unmap_memory(char* addr, size_t bytes) {
4924  MEMORY_BASIC_INFORMATION mem_info;
4925  if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4926    log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4927    return false;
4928  }
4929
4930  // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4931  // Instead, executable region was allocated using VirtualAlloc(). See
4932  // pd_map_memory() above.
4933  //
4934  // The following flags should match the 'exec_access' flages used for
4935  // VirtualProtect() in pd_map_memory().
4936  if (mem_info.Protect == PAGE_EXECUTE_READ ||
4937      mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4938    return pd_release_memory(addr, bytes);
4939  }
4940
4941  BOOL result = UnmapViewOfFile(addr);
4942  if (result == 0) {
4943    log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4944    return false;
4945  }
4946  return true;
4947}
4948
4949void os::pause() {
4950  char filename[MAX_PATH];
4951  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4952    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4953  } else {
4954    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4955  }
4956
4957  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4958  if (fd != -1) {
4959    struct stat buf;
4960    ::close(fd);
4961    while (::stat(filename, &buf) == 0) {
4962      Sleep(100);
4963    }
4964  } else {
4965    jio_fprintf(stderr,
4966                "Could not open pause file '%s', continuing immediately.\n", filename);
4967  }
4968}
4969
4970os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4971  assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4972}
4973
4974// See the caveats for this class in os_windows.hpp
4975// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4976// into this method and returns false. If no OS EXCEPTION was raised, returns
4977// true.
4978// The callback is supposed to provide the method that should be protected.
4979//
4980bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4981  assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4982  assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4983         "crash_protection already set?");
4984
4985  bool success = true;
4986  __try {
4987    WatcherThread::watcher_thread()->set_crash_protection(this);
4988    cb.call();
4989  } __except(EXCEPTION_EXECUTE_HANDLER) {
4990    // only for protection, nothing to do
4991    success = false;
4992  }
4993  WatcherThread::watcher_thread()->set_crash_protection(NULL);
4994  return success;
4995}
4996
4997// An Event wraps a win32 "CreateEvent" kernel handle.
4998//
4999// We have a number of choices regarding "CreateEvent" win32 handle leakage:
5000//
5001// 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5002//     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5003//     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5004//     In addition, an unpark() operation might fetch the handle field, but the
5005//     event could recycle between the fetch and the SetEvent() operation.
5006//     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5007//     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5008//     on an stale but recycled handle would be harmless, but in practice this might
5009//     confuse other non-Sun code, so it's not a viable approach.
5010//
5011// 2:  Once a win32 event handle is associated with an Event, it remains associated
5012//     with the Event.  The event handle is never closed.  This could be construed
5013//     as handle leakage, but only up to the maximum # of threads that have been extant
5014//     at any one time.  This shouldn't be an issue, as windows platforms typically
5015//     permit a process to have hundreds of thousands of open handles.
5016//
5017// 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5018//     and release unused handles.
5019//
5020// 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5021//     It's not clear, however, that we wouldn't be trading one type of leak for another.
5022//
5023// 5.  Use an RCU-like mechanism (Read-Copy Update).
5024//     Or perhaps something similar to Maged Michael's "Hazard pointers".
5025//
5026// We use (2).
5027//
5028// TODO-FIXME:
5029// 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5030// 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5031//     to recover from (or at least detect) the dreaded Windows 841176 bug.
5032// 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5033//     into a single win32 CreateEvent() handle.
5034//
5035// Assumption:
5036//    Only one parker can exist on an event, which is why we allocate
5037//    them per-thread. Multiple unparkers can coexist.
5038//
5039// _Event transitions in park()
5040//   -1 => -1 : illegal
5041//    1 =>  0 : pass - return immediately
5042//    0 => -1 : block; then set _Event to 0 before returning
5043//
5044// _Event transitions in unpark()
5045//    0 => 1 : just return
5046//    1 => 1 : just return
5047//   -1 => either 0 or 1; must signal target thread
5048//         That is, we can safely transition _Event from -1 to either
5049//         0 or 1.
5050//
5051// _Event serves as a restricted-range semaphore.
5052//   -1 : thread is blocked, i.e. there is a waiter
5053//    0 : neutral: thread is running or ready,
5054//        could have been signaled after a wait started
5055//    1 : signaled - thread is running or ready
5056//
5057// Another possible encoding of _Event would be with
5058// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5059//
5060
5061int os::PlatformEvent::park(jlong Millis) {
5062  // Transitions for _Event:
5063  //   -1 => -1 : illegal
5064  //    1 =>  0 : pass - return immediately
5065  //    0 => -1 : block; then set _Event to 0 before returning
5066
5067  guarantee(_ParkHandle != NULL , "Invariant");
5068  guarantee(Millis > 0          , "Invariant");
5069
5070  // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5071  // the initial park() operation.
5072  // Consider: use atomic decrement instead of CAS-loop
5073
5074  int v;
5075  for (;;) {
5076    v = _Event;
5077    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5078  }
5079  guarantee((v == 0) || (v == 1), "invariant");
5080  if (v != 0) return OS_OK;
5081
5082  // Do this the hard way by blocking ...
5083  // TODO: consider a brief spin here, gated on the success of recent
5084  // spin attempts by this thread.
5085  //
5086  // We decompose long timeouts into series of shorter timed waits.
5087  // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5088  // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5089  // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5090  // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5091  // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5092  // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5093  // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5094  // for the already waited time.  This policy does not admit any new outcomes.
5095  // In the future, however, we might want to track the accumulated wait time and
5096  // adjust Millis accordingly if we encounter a spurious wakeup.
5097
5098  const int MAXTIMEOUT = 0x10000000;
5099  DWORD rv = WAIT_TIMEOUT;
5100  while (_Event < 0 && Millis > 0) {
5101    DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5102    if (Millis > MAXTIMEOUT) {
5103      prd = MAXTIMEOUT;
5104    }
5105    rv = ::WaitForSingleObject(_ParkHandle, prd);
5106    assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5107    if (rv == WAIT_TIMEOUT) {
5108      Millis -= prd;
5109    }
5110  }
5111  v = _Event;
5112  _Event = 0;
5113  // see comment at end of os::PlatformEvent::park() below:
5114  OrderAccess::fence();
5115  // If we encounter a nearly simultanous timeout expiry and unpark()
5116  // we return OS_OK indicating we awoke via unpark().
5117  // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5118  return (v >= 0) ? OS_OK : OS_TIMEOUT;
5119}
5120
5121void os::PlatformEvent::park() {
5122  // Transitions for _Event:
5123  //   -1 => -1 : illegal
5124  //    1 =>  0 : pass - return immediately
5125  //    0 => -1 : block; then set _Event to 0 before returning
5126
5127  guarantee(_ParkHandle != NULL, "Invariant");
5128  // Invariant: Only the thread associated with the Event/PlatformEvent
5129  // may call park().
5130  // Consider: use atomic decrement instead of CAS-loop
5131  int v;
5132  for (;;) {
5133    v = _Event;
5134    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5135  }
5136  guarantee((v == 0) || (v == 1), "invariant");
5137  if (v != 0) return;
5138
5139  // Do this the hard way by blocking ...
5140  // TODO: consider a brief spin here, gated on the success of recent
5141  // spin attempts by this thread.
5142  while (_Event < 0) {
5143    DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5144    assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5145  }
5146
5147  // Usually we'll find _Event == 0 at this point, but as
5148  // an optional optimization we clear it, just in case can
5149  // multiple unpark() operations drove _Event up to 1.
5150  _Event = 0;
5151  OrderAccess::fence();
5152  guarantee(_Event >= 0, "invariant");
5153}
5154
5155void os::PlatformEvent::unpark() {
5156  guarantee(_ParkHandle != NULL, "Invariant");
5157
5158  // Transitions for _Event:
5159  //    0 => 1 : just return
5160  //    1 => 1 : just return
5161  //   -1 => either 0 or 1; must signal target thread
5162  //         That is, we can safely transition _Event from -1 to either
5163  //         0 or 1.
5164  // See also: "Semaphores in Plan 9" by Mullender & Cox
5165  //
5166  // Note: Forcing a transition from "-1" to "1" on an unpark() means
5167  // that it will take two back-to-back park() calls for the owning
5168  // thread to block. This has the benefit of forcing a spurious return
5169  // from the first park() call after an unpark() call which will help
5170  // shake out uses of park() and unpark() without condition variables.
5171
5172  if (Atomic::xchg(1, &_Event) >= 0) return;
5173
5174  ::SetEvent(_ParkHandle);
5175}
5176
5177
5178// JSR166
5179// -------------------------------------------------------
5180
5181// The Windows implementation of Park is very straightforward: Basic
5182// operations on Win32 Events turn out to have the right semantics to
5183// use them directly. We opportunistically resuse the event inherited
5184// from Monitor.
5185
5186void Parker::park(bool isAbsolute, jlong time) {
5187  guarantee(_ParkEvent != NULL, "invariant");
5188  // First, demultiplex/decode time arguments
5189  if (time < 0) { // don't wait
5190    return;
5191  } else if (time == 0 && !isAbsolute) {
5192    time = INFINITE;
5193  } else if (isAbsolute) {
5194    time -= os::javaTimeMillis(); // convert to relative time
5195    if (time <= 0) {  // already elapsed
5196      return;
5197    }
5198  } else { // relative
5199    time /= 1000000;  // Must coarsen from nanos to millis
5200    if (time == 0) {  // Wait for the minimal time unit if zero
5201      time = 1;
5202    }
5203  }
5204
5205  JavaThread* thread = JavaThread::current();
5206
5207  // Don't wait if interrupted or already triggered
5208  if (Thread::is_interrupted(thread, false) ||
5209      WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5210    ResetEvent(_ParkEvent);
5211    return;
5212  } else {
5213    ThreadBlockInVM tbivm(thread);
5214    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5215    thread->set_suspend_equivalent();
5216
5217    WaitForSingleObject(_ParkEvent, time);
5218    ResetEvent(_ParkEvent);
5219
5220    // If externally suspended while waiting, re-suspend
5221    if (thread->handle_special_suspend_equivalent_condition()) {
5222      thread->java_suspend_self();
5223    }
5224  }
5225}
5226
5227void Parker::unpark() {
5228  guarantee(_ParkEvent != NULL, "invariant");
5229  SetEvent(_ParkEvent);
5230}
5231
5232// Run the specified command in a separate process. Return its exit value,
5233// or -1 on failure (e.g. can't create a new process).
5234int os::fork_and_exec(char* cmd) {
5235  STARTUPINFO si;
5236  PROCESS_INFORMATION pi;
5237
5238  memset(&si, 0, sizeof(si));
5239  si.cb = sizeof(si);
5240  memset(&pi, 0, sizeof(pi));
5241  BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5242                            cmd,    // command line
5243                            NULL,   // process security attribute
5244                            NULL,   // thread security attribute
5245                            TRUE,   // inherits system handles
5246                            0,      // no creation flags
5247                            NULL,   // use parent's environment block
5248                            NULL,   // use parent's starting directory
5249                            &si,    // (in) startup information
5250                            &pi);   // (out) process information
5251
5252  if (rslt) {
5253    // Wait until child process exits.
5254    WaitForSingleObject(pi.hProcess, INFINITE);
5255
5256    DWORD exit_code;
5257    GetExitCodeProcess(pi.hProcess, &exit_code);
5258
5259    // Close process and thread handles.
5260    CloseHandle(pi.hProcess);
5261    CloseHandle(pi.hThread);
5262
5263    return (int)exit_code;
5264  } else {
5265    return -1;
5266  }
5267}
5268
5269bool os::find(address addr, outputStream* st) {
5270  int offset = -1;
5271  bool result = false;
5272  char buf[256];
5273  if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5274    st->print(PTR_FORMAT " ", addr);
5275    if (strlen(buf) < sizeof(buf) - 1) {
5276      char* p = strrchr(buf, '\\');
5277      if (p) {
5278        st->print("%s", p + 1);
5279      } else {
5280        st->print("%s", buf);
5281      }
5282    } else {
5283        // The library name is probably truncated. Let's omit the library name.
5284        // See also JDK-8147512.
5285    }
5286    if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5287      st->print("::%s + 0x%x", buf, offset);
5288    }
5289    st->cr();
5290    result = true;
5291  }
5292  return result;
5293}
5294
5295LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5296  DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5297
5298  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5299    JavaThread* thread = JavaThread::current();
5300    PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5301    address addr = (address) exceptionRecord->ExceptionInformation[1];
5302
5303    if (os::is_memory_serialize_page(thread, addr)) {
5304      return EXCEPTION_CONTINUE_EXECUTION;
5305    }
5306  }
5307
5308  return EXCEPTION_CONTINUE_SEARCH;
5309}
5310
5311// We don't build a headless jre for Windows
5312bool os::is_headless_jre() { return false; }
5313
5314static jint initSock() {
5315  WSADATA wsadata;
5316
5317  if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5318    jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5319                ::GetLastError());
5320    return JNI_ERR;
5321  }
5322  return JNI_OK;
5323}
5324
5325struct hostent* os::get_host_by_name(char* name) {
5326  return (struct hostent*)gethostbyname(name);
5327}
5328
5329int os::socket_close(int fd) {
5330  return ::closesocket(fd);
5331}
5332
5333int os::socket(int domain, int type, int protocol) {
5334  return ::socket(domain, type, protocol);
5335}
5336
5337int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5338  return ::connect(fd, him, len);
5339}
5340
5341int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5342  return ::recv(fd, buf, (int)nBytes, flags);
5343}
5344
5345int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5346  return ::send(fd, buf, (int)nBytes, flags);
5347}
5348
5349int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5350  return ::send(fd, buf, (int)nBytes, flags);
5351}
5352
5353// WINDOWS CONTEXT Flags for THREAD_SAMPLING
5354#if defined(IA32)
5355  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5356#elif defined (AMD64)
5357  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5358#endif
5359
5360// returns true if thread could be suspended,
5361// false otherwise
5362static bool do_suspend(HANDLE* h) {
5363  if (h != NULL) {
5364    if (SuspendThread(*h) != ~0) {
5365      return true;
5366    }
5367  }
5368  return false;
5369}
5370
5371// resume the thread
5372// calling resume on an active thread is a no-op
5373static void do_resume(HANDLE* h) {
5374  if (h != NULL) {
5375    ResumeThread(*h);
5376  }
5377}
5378
5379// retrieve a suspend/resume context capable handle
5380// from the tid. Caller validates handle return value.
5381void get_thread_handle_for_extended_context(HANDLE* h,
5382                                            OSThread::thread_id_t tid) {
5383  if (h != NULL) {
5384    *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5385  }
5386}
5387
5388// Thread sampling implementation
5389//
5390void os::SuspendedThreadTask::internal_do_task() {
5391  CONTEXT    ctxt;
5392  HANDLE     h = NULL;
5393
5394  // get context capable handle for thread
5395  get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5396
5397  // sanity
5398  if (h == NULL || h == INVALID_HANDLE_VALUE) {
5399    return;
5400  }
5401
5402  // suspend the thread
5403  if (do_suspend(&h)) {
5404    ctxt.ContextFlags = sampling_context_flags;
5405    // get thread context
5406    GetThreadContext(h, &ctxt);
5407    SuspendedThreadTaskContext context(_thread, &ctxt);
5408    // pass context to Thread Sampling impl
5409    do_task(context);
5410    // resume thread
5411    do_resume(&h);
5412  }
5413
5414  // close handle
5415  CloseHandle(h);
5416}
5417
5418bool os::start_debugging(char *buf, int buflen) {
5419  int len = (int)strlen(buf);
5420  char *p = &buf[len];
5421
5422  jio_snprintf(p, buflen-len,
5423             "\n\n"
5424             "Do you want to debug the problem?\n\n"
5425             "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5426             "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5427             "Otherwise, select 'No' to abort...",
5428             os::current_process_id(), os::current_thread_id());
5429
5430  bool yes = os::message_box("Unexpected Error", buf);
5431
5432  if (yes) {
5433    // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5434    // exception. If VM is running inside a debugger, the debugger will
5435    // catch the exception. Otherwise, the breakpoint exception will reach
5436    // the default windows exception handler, which can spawn a debugger and
5437    // automatically attach to the dying VM.
5438    os::breakpoint();
5439    yes = false;
5440  }
5441  return yes;
5442}
5443
5444void* os::get_default_process_handle() {
5445  return (void*)GetModuleHandle(NULL);
5446}
5447
5448// Builds a platform dependent Agent_OnLoad_<lib_name> function name
5449// which is used to find statically linked in agents.
5450// Additionally for windows, takes into account __stdcall names.
5451// Parameters:
5452//            sym_name: Symbol in library we are looking for
5453//            lib_name: Name of library to look in, NULL for shared libs.
5454//            is_absolute_path == true if lib_name is absolute path to agent
5455//                                     such as "C:/a/b/L.dll"
5456//            == false if only the base name of the library is passed in
5457//               such as "L"
5458char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5459                                    bool is_absolute_path) {
5460  char *agent_entry_name;
5461  size_t len;
5462  size_t name_len;
5463  size_t prefix_len = strlen(JNI_LIB_PREFIX);
5464  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5465  const char *start;
5466
5467  if (lib_name != NULL) {
5468    len = name_len = strlen(lib_name);
5469    if (is_absolute_path) {
5470      // Need to strip path, prefix and suffix
5471      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5472        lib_name = ++start;
5473      } else {
5474        // Need to check for drive prefix
5475        if ((start = strchr(lib_name, ':')) != NULL) {
5476          lib_name = ++start;
5477        }
5478      }
5479      if (len <= (prefix_len + suffix_len)) {
5480        return NULL;
5481      }
5482      lib_name += prefix_len;
5483      name_len = strlen(lib_name) - suffix_len;
5484    }
5485  }
5486  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5487  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5488  if (agent_entry_name == NULL) {
5489    return NULL;
5490  }
5491  if (lib_name != NULL) {
5492    const char *p = strrchr(sym_name, '@');
5493    if (p != NULL && p != sym_name) {
5494      // sym_name == _Agent_OnLoad@XX
5495      strncpy(agent_entry_name, sym_name, (p - sym_name));
5496      agent_entry_name[(p-sym_name)] = '\0';
5497      // agent_entry_name == _Agent_OnLoad
5498      strcat(agent_entry_name, "_");
5499      strncat(agent_entry_name, lib_name, name_len);
5500      strcat(agent_entry_name, p);
5501      // agent_entry_name == _Agent_OnLoad_lib_name@XX
5502    } else {
5503      strcpy(agent_entry_name, sym_name);
5504      strcat(agent_entry_name, "_");
5505      strncat(agent_entry_name, lib_name, name_len);
5506    }
5507  } else {
5508    strcpy(agent_entry_name, sym_name);
5509  }
5510  return agent_entry_name;
5511}
5512
5513#ifndef PRODUCT
5514
5515// test the code path in reserve_memory_special() that tries to allocate memory in a single
5516// contiguous memory block at a particular address.
5517// The test first tries to find a good approximate address to allocate at by using the same
5518// method to allocate some memory at any address. The test then tries to allocate memory in
5519// the vicinity (not directly after it to avoid possible by-chance use of that location)
5520// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5521// the previously allocated memory is available for allocation. The only actual failure
5522// that is reported is when the test tries to allocate at a particular location but gets a
5523// different valid one. A NULL return value at this point is not considered an error but may
5524// be legitimate.
5525// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5526void TestReserveMemorySpecial_test() {
5527  if (!UseLargePages) {
5528    if (VerboseInternalVMTests) {
5529      tty->print("Skipping test because large pages are disabled");
5530    }
5531    return;
5532  }
5533  // save current value of globals
5534  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5535  bool old_use_numa_interleaving = UseNUMAInterleaving;
5536
5537  // set globals to make sure we hit the correct code path
5538  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5539
5540  // do an allocation at an address selected by the OS to get a good one.
5541  const size_t large_allocation_size = os::large_page_size() * 4;
5542  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5543  if (result == NULL) {
5544    if (VerboseInternalVMTests) {
5545      tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5546                          large_allocation_size);
5547    }
5548  } else {
5549    os::release_memory_special(result, large_allocation_size);
5550
5551    // allocate another page within the recently allocated memory area which seems to be a good location. At least
5552    // we managed to get it once.
5553    const size_t expected_allocation_size = os::large_page_size();
5554    char* expected_location = result + os::large_page_size();
5555    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5556    if (actual_location == NULL) {
5557      if (VerboseInternalVMTests) {
5558        tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5559                            expected_location, large_allocation_size);
5560      }
5561    } else {
5562      // release memory
5563      os::release_memory_special(actual_location, expected_allocation_size);
5564      // only now check, after releasing any memory to avoid any leaks.
5565      assert(actual_location == expected_location,
5566             "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5567             expected_location, expected_allocation_size, actual_location);
5568    }
5569  }
5570
5571  // restore globals
5572  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5573  UseNUMAInterleaving = old_use_numa_interleaving;
5574}
5575#endif // PRODUCT
5576
5577/*
5578  All the defined signal names for Windows.
5579
5580  NOTE that not all of these names are accepted by FindSignal!
5581
5582  For various reasons some of these may be rejected at runtime.
5583
5584  Here are the names currently accepted by a user of sun.misc.Signal with
5585  1.4.1 (ignoring potential interaction with use of chaining, etc):
5586
5587     (LIST TBD)
5588
5589*/
5590int os::get_signal_number(const char* name) {
5591  static const struct {
5592    char* name;
5593    int   number;
5594  } siglabels [] =
5595    // derived from version 6.0 VC98/include/signal.h
5596  {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5597  "FPE",        SIGFPE,         // floating point exception
5598  "SEGV",       SIGSEGV,        // segment violation
5599  "INT",        SIGINT,         // interrupt
5600  "TERM",       SIGTERM,        // software term signal from kill
5601  "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5602  "ILL",        SIGILL};        // illegal instruction
5603  for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5604    if (strcmp(name, siglabels[i].name) == 0) {
5605      return siglabels[i].number;
5606    }
5607  }
5608  return -1;
5609}
5610
5611// Fast current thread access
5612
5613int os::win32::_thread_ptr_offset = 0;
5614
5615static void call_wrapper_dummy() {}
5616
5617// We need to call the os_exception_wrapper once so that it sets
5618// up the offset from FS of the thread pointer.
5619void os::win32::initialize_thread_ptr_offset() {
5620  os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5621                           NULL, NULL, NULL, NULL);
5622}
5623