os_windows.cpp revision 11658:8a5735c11a84
1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
26#define _WIN32_WINNT 0x0600
27
28// no precompiled headers
29#include "classfile/classLoader.hpp"
30#include "classfile/systemDictionary.hpp"
31#include "classfile/vmSymbols.hpp"
32#include "code/icBuffer.hpp"
33#include "code/vtableStubs.hpp"
34#include "compiler/compileBroker.hpp"
35#include "compiler/disassembler.hpp"
36#include "interpreter/interpreter.hpp"
37#include "jvm_windows.h"
38#include "logging/log.hpp"
39#include "memory/allocation.inline.hpp"
40#include "memory/filemap.hpp"
41#include "oops/oop.inline.hpp"
42#include "os_share_windows.hpp"
43#include "os_windows.inline.hpp"
44#include "prims/jniFastGetField.hpp"
45#include "prims/jvm.h"
46#include "prims/jvm_misc.hpp"
47#include "runtime/arguments.hpp"
48#include "runtime/atomic.inline.hpp"
49#include "runtime/extendedPC.hpp"
50#include "runtime/globals.hpp"
51#include "runtime/interfaceSupport.hpp"
52#include "runtime/java.hpp"
53#include "runtime/javaCalls.hpp"
54#include "runtime/mutexLocker.hpp"
55#include "runtime/objectMonitor.hpp"
56#include "runtime/orderAccess.inline.hpp"
57#include "runtime/osThread.hpp"
58#include "runtime/perfMemory.hpp"
59#include "runtime/sharedRuntime.hpp"
60#include "runtime/statSampler.hpp"
61#include "runtime/stubRoutines.hpp"
62#include "runtime/thread.inline.hpp"
63#include "runtime/threadCritical.hpp"
64#include "runtime/timer.hpp"
65#include "runtime/vm_version.hpp"
66#include "semaphore_windows.hpp"
67#include "services/attachListener.hpp"
68#include "services/memTracker.hpp"
69#include "services/runtimeService.hpp"
70#include "utilities/decoder.hpp"
71#include "utilities/defaultStream.hpp"
72#include "utilities/events.hpp"
73#include "utilities/growableArray.hpp"
74#include "utilities/macros.hpp"
75#include "utilities/vmError.hpp"
76
77#ifdef _DEBUG
78#include <crtdbg.h>
79#endif
80
81
82#include <windows.h>
83#include <sys/types.h>
84#include <sys/stat.h>
85#include <sys/timeb.h>
86#include <objidl.h>
87#include <shlobj.h>
88
89#include <malloc.h>
90#include <signal.h>
91#include <direct.h>
92#include <errno.h>
93#include <fcntl.h>
94#include <io.h>
95#include <process.h>              // For _beginthreadex(), _endthreadex()
96#include <imagehlp.h>             // For os::dll_address_to_function_name
97// for enumerating dll libraries
98#include <vdmdbg.h>
99
100// for timer info max values which include all bits
101#define ALL_64_BITS CONST64(-1)
102
103// For DLL loading/load error detection
104// Values of PE COFF
105#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
106#define IMAGE_FILE_SIGNATURE_LENGTH 4
107
108static HANDLE main_process;
109static HANDLE main_thread;
110static int    main_thread_id;
111
112static FILETIME process_creation_time;
113static FILETIME process_exit_time;
114static FILETIME process_user_time;
115static FILETIME process_kernel_time;
116
117#ifdef _M_IA64
118  #define __CPU__ ia64
119#else
120  #ifdef _M_AMD64
121    #define __CPU__ amd64
122  #else
123    #define __CPU__ i486
124  #endif
125#endif
126
127// save DLL module handle, used by GetModuleFileName
128
129HINSTANCE vm_lib_handle;
130
131BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
132  switch (reason) {
133  case DLL_PROCESS_ATTACH:
134    vm_lib_handle = hinst;
135    if (ForceTimeHighResolution) {
136      timeBeginPeriod(1L);
137    }
138    break;
139  case DLL_PROCESS_DETACH:
140    if (ForceTimeHighResolution) {
141      timeEndPeriod(1L);
142    }
143    break;
144  default:
145    break;
146  }
147  return true;
148}
149
150static inline double fileTimeAsDouble(FILETIME* time) {
151  const double high  = (double) ((unsigned int) ~0);
152  const double split = 10000000.0;
153  double result = (time->dwLowDateTime / split) +
154                   time->dwHighDateTime * (high/split);
155  return result;
156}
157
158// Implementation of os
159
160bool os::unsetenv(const char* name) {
161  assert(name != NULL, "Null pointer");
162  return (SetEnvironmentVariable(name, NULL) == TRUE);
163}
164
165// No setuid programs under Windows.
166bool os::have_special_privileges() {
167  return false;
168}
169
170
171// This method is  a periodic task to check for misbehaving JNI applications
172// under CheckJNI, we can add any periodic checks here.
173// For Windows at the moment does nothing
174void os::run_periodic_checks() {
175  return;
176}
177
178// previous UnhandledExceptionFilter, if there is one
179static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
180
181LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
182
183void os::init_system_properties_values() {
184  // sysclasspath, java_home, dll_dir
185  {
186    char *home_path;
187    char *dll_path;
188    char *pslash;
189    char *bin = "\\bin";
190    char home_dir[MAX_PATH + 1];
191    char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
192
193    if (alt_home_dir != NULL)  {
194      strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
195      home_dir[MAX_PATH] = '\0';
196    } else {
197      os::jvm_path(home_dir, sizeof(home_dir));
198      // Found the full path to jvm.dll.
199      // Now cut the path to <java_home>/jre if we can.
200      *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
201      pslash = strrchr(home_dir, '\\');
202      if (pslash != NULL) {
203        *pslash = '\0';                   // get rid of \{client|server}
204        pslash = strrchr(home_dir, '\\');
205        if (pslash != NULL) {
206          *pslash = '\0';                 // get rid of \bin
207        }
208      }
209    }
210
211    home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
212    if (home_path == NULL) {
213      return;
214    }
215    strcpy(home_path, home_dir);
216    Arguments::set_java_home(home_path);
217    FREE_C_HEAP_ARRAY(char, home_path);
218
219    dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
220                                mtInternal);
221    if (dll_path == NULL) {
222      return;
223    }
224    strcpy(dll_path, home_dir);
225    strcat(dll_path, bin);
226    Arguments::set_dll_dir(dll_path);
227    FREE_C_HEAP_ARRAY(char, dll_path);
228
229    if (!set_boot_path('\\', ';')) {
230      return;
231    }
232  }
233
234// library_path
235#define EXT_DIR "\\lib\\ext"
236#define BIN_DIR "\\bin"
237#define PACKAGE_DIR "\\Sun\\Java"
238  {
239    // Win32 library search order (See the documentation for LoadLibrary):
240    //
241    // 1. The directory from which application is loaded.
242    // 2. The system wide Java Extensions directory (Java only)
243    // 3. System directory (GetSystemDirectory)
244    // 4. Windows directory (GetWindowsDirectory)
245    // 5. The PATH environment variable
246    // 6. The current directory
247
248    char *library_path;
249    char tmp[MAX_PATH];
250    char *path_str = ::getenv("PATH");
251
252    library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
253                                    sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
254
255    library_path[0] = '\0';
256
257    GetModuleFileName(NULL, tmp, sizeof(tmp));
258    *(strrchr(tmp, '\\')) = '\0';
259    strcat(library_path, tmp);
260
261    GetWindowsDirectory(tmp, sizeof(tmp));
262    strcat(library_path, ";");
263    strcat(library_path, tmp);
264    strcat(library_path, PACKAGE_DIR BIN_DIR);
265
266    GetSystemDirectory(tmp, sizeof(tmp));
267    strcat(library_path, ";");
268    strcat(library_path, tmp);
269
270    GetWindowsDirectory(tmp, sizeof(tmp));
271    strcat(library_path, ";");
272    strcat(library_path, tmp);
273
274    if (path_str) {
275      strcat(library_path, ";");
276      strcat(library_path, path_str);
277    }
278
279    strcat(library_path, ";.");
280
281    Arguments::set_library_path(library_path);
282    FREE_C_HEAP_ARRAY(char, library_path);
283  }
284
285  // Default extensions directory
286  {
287    char path[MAX_PATH];
288    char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
289    GetWindowsDirectory(path, MAX_PATH);
290    sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
291            path, PACKAGE_DIR, EXT_DIR);
292    Arguments::set_ext_dirs(buf);
293  }
294  #undef EXT_DIR
295  #undef BIN_DIR
296  #undef PACKAGE_DIR
297
298#ifndef _WIN64
299  // set our UnhandledExceptionFilter and save any previous one
300  prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
301#endif
302
303  // Done
304  return;
305}
306
307void os::breakpoint() {
308  DebugBreak();
309}
310
311// Invoked from the BREAKPOINT Macro
312extern "C" void breakpoint() {
313  os::breakpoint();
314}
315
316// RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
317// So far, this method is only used by Native Memory Tracking, which is
318// only supported on Windows XP or later.
319//
320int os::get_native_stack(address* stack, int frames, int toSkip) {
321#ifdef _NMT_NOINLINE_
322  toSkip++;
323#endif
324  int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
325  for (int index = captured; index < frames; index ++) {
326    stack[index] = NULL;
327  }
328  return captured;
329}
330
331
332// os::current_stack_base()
333//
334//   Returns the base of the stack, which is the stack's
335//   starting address.  This function must be called
336//   while running on the stack of the thread being queried.
337
338address os::current_stack_base() {
339  MEMORY_BASIC_INFORMATION minfo;
340  address stack_bottom;
341  size_t stack_size;
342
343  VirtualQuery(&minfo, &minfo, sizeof(minfo));
344  stack_bottom =  (address)minfo.AllocationBase;
345  stack_size = minfo.RegionSize;
346
347  // Add up the sizes of all the regions with the same
348  // AllocationBase.
349  while (1) {
350    VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
351    if (stack_bottom == (address)minfo.AllocationBase) {
352      stack_size += minfo.RegionSize;
353    } else {
354      break;
355    }
356  }
357
358#ifdef _M_IA64
359  // IA64 has memory and register stacks
360  //
361  // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit
362  // at thread creation (1MB backing store growing upwards, 1MB memory stack
363  // growing downwards, 2MB summed up)
364  //
365  // ...
366  // ------- top of stack (high address) -----
367  // |
368  // |      1MB
369  // |      Backing Store (Register Stack)
370  // |
371  // |         / \
372  // |          |
373  // |          |
374  // |          |
375  // ------------------------ stack base -----
376  // |      1MB
377  // |      Memory Stack
378  // |
379  // |          |
380  // |          |
381  // |          |
382  // |         \ /
383  // |
384  // ----- bottom of stack (low address) -----
385  // ...
386
387  stack_size = stack_size / 2;
388#endif
389  return stack_bottom + stack_size;
390}
391
392size_t os::current_stack_size() {
393  size_t sz;
394  MEMORY_BASIC_INFORMATION minfo;
395  VirtualQuery(&minfo, &minfo, sizeof(minfo));
396  sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
397  return sz;
398}
399
400struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
401  const struct tm* time_struct_ptr = localtime(clock);
402  if (time_struct_ptr != NULL) {
403    *res = *time_struct_ptr;
404    return res;
405  }
406  return NULL;
407}
408
409LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
410
411// Thread start routine for all newly created threads
412static unsigned __stdcall thread_native_entry(Thread* thread) {
413  // Try to randomize the cache line index of hot stack frames.
414  // This helps when threads of the same stack traces evict each other's
415  // cache lines. The threads can be either from the same JVM instance, or
416  // from different JVM instances. The benefit is especially true for
417  // processors with hyperthreading technology.
418  static int counter = 0;
419  int pid = os::current_process_id();
420  _alloca(((pid ^ counter++) & 7) * 128);
421
422  thread->initialize_thread_current();
423
424  OSThread* osthr = thread->osthread();
425  assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
426
427  if (UseNUMA) {
428    int lgrp_id = os::numa_get_group_id();
429    if (lgrp_id != -1) {
430      thread->set_lgrp_id(lgrp_id);
431    }
432  }
433
434  // Diagnostic code to investigate JDK-6573254
435  int res = 30115;  // non-java thread
436  if (thread->is_Java_thread()) {
437    res = 20115;    // java thread
438  }
439
440  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
441
442  // Install a win32 structured exception handler around every thread created
443  // by VM, so VM can generate error dump when an exception occurred in non-
444  // Java thread (e.g. VM thread).
445  __try {
446    thread->run();
447  } __except(topLevelExceptionFilter(
448                                     (_EXCEPTION_POINTERS*)_exception_info())) {
449    // Nothing to do.
450  }
451
452  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
453
454  // One less thread is executing
455  // When the VMThread gets here, the main thread may have already exited
456  // which frees the CodeHeap containing the Atomic::add code
457  if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
458    Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count);
459  }
460
461  // If a thread has not deleted itself ("delete this") as part of its
462  // termination sequence, we have to ensure thread-local-storage is
463  // cleared before we actually terminate. No threads should ever be
464  // deleted asynchronously with respect to their termination.
465  if (Thread::current_or_null_safe() != NULL) {
466    assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
467    thread->clear_thread_current();
468  }
469
470  // Thread must not return from exit_process_or_thread(), but if it does,
471  // let it proceed to exit normally
472  return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
473}
474
475static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
476                                  int thread_id) {
477  // Allocate the OSThread object
478  OSThread* osthread = new OSThread(NULL, NULL);
479  if (osthread == NULL) return NULL;
480
481  // Initialize support for Java interrupts
482  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
483  if (interrupt_event == NULL) {
484    delete osthread;
485    return NULL;
486  }
487  osthread->set_interrupt_event(interrupt_event);
488
489  // Store info on the Win32 thread into the OSThread
490  osthread->set_thread_handle(thread_handle);
491  osthread->set_thread_id(thread_id);
492
493  if (UseNUMA) {
494    int lgrp_id = os::numa_get_group_id();
495    if (lgrp_id != -1) {
496      thread->set_lgrp_id(lgrp_id);
497    }
498  }
499
500  // Initial thread state is INITIALIZED, not SUSPENDED
501  osthread->set_state(INITIALIZED);
502
503  return osthread;
504}
505
506
507bool os::create_attached_thread(JavaThread* thread) {
508#ifdef ASSERT
509  thread->verify_not_published();
510#endif
511  HANDLE thread_h;
512  if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
513                       &thread_h, THREAD_ALL_ACCESS, false, 0)) {
514    fatal("DuplicateHandle failed\n");
515  }
516  OSThread* osthread = create_os_thread(thread, thread_h,
517                                        (int)current_thread_id());
518  if (osthread == NULL) {
519    return false;
520  }
521
522  // Initial thread state is RUNNABLE
523  osthread->set_state(RUNNABLE);
524
525  thread->set_osthread(osthread);
526
527  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
528    os::current_thread_id());
529
530  return true;
531}
532
533bool os::create_main_thread(JavaThread* thread) {
534#ifdef ASSERT
535  thread->verify_not_published();
536#endif
537  if (_starting_thread == NULL) {
538    _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
539    if (_starting_thread == NULL) {
540      return false;
541    }
542  }
543
544  // The primordial thread is runnable from the start)
545  _starting_thread->set_state(RUNNABLE);
546
547  thread->set_osthread(_starting_thread);
548  return true;
549}
550
551// Helper function to trace _beginthreadex attributes,
552//  similar to os::Posix::describe_pthread_attr()
553static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
554                                               size_t stacksize, unsigned initflag) {
555  stringStream ss(buf, buflen);
556  if (stacksize == 0) {
557    ss.print("stacksize: default, ");
558  } else {
559    ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
560  }
561  ss.print("flags: ");
562  #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
563  #define ALL(X) \
564    X(CREATE_SUSPENDED) \
565    X(STACK_SIZE_PARAM_IS_A_RESERVATION)
566  ALL(PRINT_FLAG)
567  #undef ALL
568  #undef PRINT_FLAG
569  return buf;
570}
571
572// Allocate and initialize a new OSThread
573bool os::create_thread(Thread* thread, ThreadType thr_type,
574                       size_t stack_size) {
575  unsigned thread_id;
576
577  // Allocate the OSThread object
578  OSThread* osthread = new OSThread(NULL, NULL);
579  if (osthread == NULL) {
580    return false;
581  }
582
583  // Initialize support for Java interrupts
584  HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
585  if (interrupt_event == NULL) {
586    delete osthread;
587    return NULL;
588  }
589  osthread->set_interrupt_event(interrupt_event);
590  osthread->set_interrupted(false);
591
592  thread->set_osthread(osthread);
593
594  if (stack_size == 0) {
595    switch (thr_type) {
596    case os::java_thread:
597      // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
598      if (JavaThread::stack_size_at_create() > 0) {
599        stack_size = JavaThread::stack_size_at_create();
600      }
601      break;
602    case os::compiler_thread:
603      if (CompilerThreadStackSize > 0) {
604        stack_size = (size_t)(CompilerThreadStackSize * K);
605        break;
606      } // else fall through:
607        // use VMThreadStackSize if CompilerThreadStackSize is not defined
608    case os::vm_thread:
609    case os::pgc_thread:
610    case os::cgc_thread:
611    case os::watcher_thread:
612      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
613      break;
614    }
615  }
616
617  // Create the Win32 thread
618  //
619  // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
620  // does not specify stack size. Instead, it specifies the size of
621  // initially committed space. The stack size is determined by
622  // PE header in the executable. If the committed "stack_size" is larger
623  // than default value in the PE header, the stack is rounded up to the
624  // nearest multiple of 1MB. For example if the launcher has default
625  // stack size of 320k, specifying any size less than 320k does not
626  // affect the actual stack size at all, it only affects the initial
627  // commitment. On the other hand, specifying 'stack_size' larger than
628  // default value may cause significant increase in memory usage, because
629  // not only the stack space will be rounded up to MB, but also the
630  // entire space is committed upfront.
631  //
632  // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
633  // for CreateThread() that can treat 'stack_size' as stack size. However we
634  // are not supposed to call CreateThread() directly according to MSDN
635  // document because JVM uses C runtime library. The good news is that the
636  // flag appears to work with _beginthredex() as well.
637
638  const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
639  HANDLE thread_handle =
640    (HANDLE)_beginthreadex(NULL,
641                           (unsigned)stack_size,
642                           (unsigned (__stdcall *)(void*)) thread_native_entry,
643                           thread,
644                           initflag,
645                           &thread_id);
646
647  char buf[64];
648  if (thread_handle != NULL) {
649    log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
650      thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
651  } else {
652    log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
653      os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
654  }
655
656  if (thread_handle == NULL) {
657    // Need to clean up stuff we've allocated so far
658    CloseHandle(osthread->interrupt_event());
659    thread->set_osthread(NULL);
660    delete osthread;
661    return NULL;
662  }
663
664  Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count);
665
666  // Store info on the Win32 thread into the OSThread
667  osthread->set_thread_handle(thread_handle);
668  osthread->set_thread_id(thread_id);
669
670  // Initial thread state is INITIALIZED, not SUSPENDED
671  osthread->set_state(INITIALIZED);
672
673  // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
674  return true;
675}
676
677
678// Free Win32 resources related to the OSThread
679void os::free_thread(OSThread* osthread) {
680  assert(osthread != NULL, "osthread not set");
681
682  // We are told to free resources of the argument thread,
683  // but we can only really operate on the current thread.
684  assert(Thread::current()->osthread() == osthread,
685         "os::free_thread but not current thread");
686
687  CloseHandle(osthread->thread_handle());
688  CloseHandle(osthread->interrupt_event());
689  delete osthread;
690}
691
692static jlong first_filetime;
693static jlong initial_performance_count;
694static jlong performance_frequency;
695
696
697jlong as_long(LARGE_INTEGER x) {
698  jlong result = 0; // initialization to avoid warning
699  set_high(&result, x.HighPart);
700  set_low(&result, x.LowPart);
701  return result;
702}
703
704
705jlong os::elapsed_counter() {
706  LARGE_INTEGER count;
707  QueryPerformanceCounter(&count);
708  return as_long(count) - initial_performance_count;
709}
710
711
712jlong os::elapsed_frequency() {
713  return performance_frequency;
714}
715
716
717julong os::available_memory() {
718  return win32::available_memory();
719}
720
721julong os::win32::available_memory() {
722  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
723  // value if total memory is larger than 4GB
724  MEMORYSTATUSEX ms;
725  ms.dwLength = sizeof(ms);
726  GlobalMemoryStatusEx(&ms);
727
728  return (julong)ms.ullAvailPhys;
729}
730
731julong os::physical_memory() {
732  return win32::physical_memory();
733}
734
735bool os::has_allocatable_memory_limit(julong* limit) {
736  MEMORYSTATUSEX ms;
737  ms.dwLength = sizeof(ms);
738  GlobalMemoryStatusEx(&ms);
739#ifdef _LP64
740  *limit = (julong)ms.ullAvailVirtual;
741  return true;
742#else
743  // Limit to 1400m because of the 2gb address space wall
744  *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
745  return true;
746#endif
747}
748
749int os::active_processor_count() {
750  DWORD_PTR lpProcessAffinityMask = 0;
751  DWORD_PTR lpSystemAffinityMask = 0;
752  int proc_count = processor_count();
753  if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
754      GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
755    // Nof active processors is number of bits in process affinity mask
756    int bitcount = 0;
757    while (lpProcessAffinityMask != 0) {
758      lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
759      bitcount++;
760    }
761    return bitcount;
762  } else {
763    return proc_count;
764  }
765}
766
767void os::set_native_thread_name(const char *name) {
768
769  // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
770  //
771  // Note that unfortunately this only works if the process
772  // is already attached to a debugger; debugger must observe
773  // the exception below to show the correct name.
774
775  const DWORD MS_VC_EXCEPTION = 0x406D1388;
776  struct {
777    DWORD dwType;     // must be 0x1000
778    LPCSTR szName;    // pointer to name (in user addr space)
779    DWORD dwThreadID; // thread ID (-1=caller thread)
780    DWORD dwFlags;    // reserved for future use, must be zero
781  } info;
782
783  info.dwType = 0x1000;
784  info.szName = name;
785  info.dwThreadID = -1;
786  info.dwFlags = 0;
787
788  __try {
789    RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
790  } __except(EXCEPTION_CONTINUE_EXECUTION) {}
791}
792
793bool os::distribute_processes(uint length, uint* distribution) {
794  // Not yet implemented.
795  return false;
796}
797
798bool os::bind_to_processor(uint processor_id) {
799  // Not yet implemented.
800  return false;
801}
802
803void os::win32::initialize_performance_counter() {
804  LARGE_INTEGER count;
805  QueryPerformanceFrequency(&count);
806  performance_frequency = as_long(count);
807  QueryPerformanceCounter(&count);
808  initial_performance_count = as_long(count);
809}
810
811
812double os::elapsedTime() {
813  return (double) elapsed_counter() / (double) elapsed_frequency();
814}
815
816
817// Windows format:
818//   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
819// Java format:
820//   Java standards require the number of milliseconds since 1/1/1970
821
822// Constant offset - calculated using offset()
823static jlong  _offset   = 116444736000000000;
824// Fake time counter for reproducible results when debugging
825static jlong  fake_time = 0;
826
827#ifdef ASSERT
828// Just to be safe, recalculate the offset in debug mode
829static jlong _calculated_offset = 0;
830static int   _has_calculated_offset = 0;
831
832jlong offset() {
833  if (_has_calculated_offset) return _calculated_offset;
834  SYSTEMTIME java_origin;
835  java_origin.wYear          = 1970;
836  java_origin.wMonth         = 1;
837  java_origin.wDayOfWeek     = 0; // ignored
838  java_origin.wDay           = 1;
839  java_origin.wHour          = 0;
840  java_origin.wMinute        = 0;
841  java_origin.wSecond        = 0;
842  java_origin.wMilliseconds  = 0;
843  FILETIME jot;
844  if (!SystemTimeToFileTime(&java_origin, &jot)) {
845    fatal("Error = %d\nWindows error", GetLastError());
846  }
847  _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
848  _has_calculated_offset = 1;
849  assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
850  return _calculated_offset;
851}
852#else
853jlong offset() {
854  return _offset;
855}
856#endif
857
858jlong windows_to_java_time(FILETIME wt) {
859  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
860  return (a - offset()) / 10000;
861}
862
863// Returns time ticks in (10th of micro seconds)
864jlong windows_to_time_ticks(FILETIME wt) {
865  jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
866  return (a - offset());
867}
868
869FILETIME java_to_windows_time(jlong l) {
870  jlong a = (l * 10000) + offset();
871  FILETIME result;
872  result.dwHighDateTime = high(a);
873  result.dwLowDateTime  = low(a);
874  return result;
875}
876
877bool os::supports_vtime() { return true; }
878bool os::enable_vtime() { return false; }
879bool os::vtime_enabled() { return false; }
880
881double os::elapsedVTime() {
882  FILETIME created;
883  FILETIME exited;
884  FILETIME kernel;
885  FILETIME user;
886  if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
887    // the resolution of windows_to_java_time() should be sufficient (ms)
888    return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
889  } else {
890    return elapsedTime();
891  }
892}
893
894jlong os::javaTimeMillis() {
895  if (UseFakeTimers) {
896    return fake_time++;
897  } else {
898    FILETIME wt;
899    GetSystemTimeAsFileTime(&wt);
900    return windows_to_java_time(wt);
901  }
902}
903
904void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
905  FILETIME wt;
906  GetSystemTimeAsFileTime(&wt);
907  jlong ticks = windows_to_time_ticks(wt); // 10th of micros
908  jlong secs = jlong(ticks / 10000000); // 10000 * 1000
909  seconds = secs;
910  nanos = jlong(ticks - (secs*10000000)) * 100;
911}
912
913jlong os::javaTimeNanos() {
914    LARGE_INTEGER current_count;
915    QueryPerformanceCounter(&current_count);
916    double current = as_long(current_count);
917    double freq = performance_frequency;
918    jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
919    return time;
920}
921
922void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
923  jlong freq = performance_frequency;
924  if (freq < NANOSECS_PER_SEC) {
925    // the performance counter is 64 bits and we will
926    // be multiplying it -- so no wrap in 64 bits
927    info_ptr->max_value = ALL_64_BITS;
928  } else if (freq > NANOSECS_PER_SEC) {
929    // use the max value the counter can reach to
930    // determine the max value which could be returned
931    julong max_counter = (julong)ALL_64_BITS;
932    info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
933  } else {
934    // the performance counter is 64 bits and we will
935    // be using it directly -- so no wrap in 64 bits
936    info_ptr->max_value = ALL_64_BITS;
937  }
938
939  // using a counter, so no skipping
940  info_ptr->may_skip_backward = false;
941  info_ptr->may_skip_forward = false;
942
943  info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
944}
945
946char* os::local_time_string(char *buf, size_t buflen) {
947  SYSTEMTIME st;
948  GetLocalTime(&st);
949  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
950               st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
951  return buf;
952}
953
954bool os::getTimesSecs(double* process_real_time,
955                      double* process_user_time,
956                      double* process_system_time) {
957  HANDLE h_process = GetCurrentProcess();
958  FILETIME create_time, exit_time, kernel_time, user_time;
959  BOOL result = GetProcessTimes(h_process,
960                                &create_time,
961                                &exit_time,
962                                &kernel_time,
963                                &user_time);
964  if (result != 0) {
965    FILETIME wt;
966    GetSystemTimeAsFileTime(&wt);
967    jlong rtc_millis = windows_to_java_time(wt);
968    *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
969    *process_user_time =
970      (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
971    *process_system_time =
972      (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
973    return true;
974  } else {
975    return false;
976  }
977}
978
979void os::shutdown() {
980  // allow PerfMemory to attempt cleanup of any persistent resources
981  perfMemory_exit();
982
983  // flush buffered output, finish log files
984  ostream_abort();
985
986  // Check for abort hook
987  abort_hook_t abort_hook = Arguments::abort_hook();
988  if (abort_hook != NULL) {
989    abort_hook();
990  }
991}
992
993
994static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
995                                         PMINIDUMP_EXCEPTION_INFORMATION,
996                                         PMINIDUMP_USER_STREAM_INFORMATION,
997                                         PMINIDUMP_CALLBACK_INFORMATION);
998
999static HANDLE dumpFile = NULL;
1000
1001// Check if dump file can be created.
1002void os::check_dump_limit(char* buffer, size_t buffsz) {
1003  bool status = true;
1004  if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1005    jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1006    status = false;
1007  }
1008
1009#ifndef ASSERT
1010  if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1011    jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1012    status = false;
1013  }
1014#endif
1015
1016  if (status) {
1017    const char* cwd = get_current_directory(NULL, 0);
1018    int pid = current_process_id();
1019    if (cwd != NULL) {
1020      jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1021    } else {
1022      jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1023    }
1024
1025    if (dumpFile == NULL &&
1026       (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1027                 == INVALID_HANDLE_VALUE) {
1028      jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1029      status = false;
1030    }
1031  }
1032  VMError::record_coredump_status(buffer, status);
1033}
1034
1035void os::abort(bool dump_core, void* siginfo, const void* context) {
1036  HINSTANCE dbghelp;
1037  EXCEPTION_POINTERS ep;
1038  MINIDUMP_EXCEPTION_INFORMATION mei;
1039  MINIDUMP_EXCEPTION_INFORMATION* pmei;
1040
1041  HANDLE hProcess = GetCurrentProcess();
1042  DWORD processId = GetCurrentProcessId();
1043  MINIDUMP_TYPE dumpType;
1044
1045  shutdown();
1046  if (!dump_core || dumpFile == NULL) {
1047    if (dumpFile != NULL) {
1048      CloseHandle(dumpFile);
1049    }
1050    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1051  }
1052
1053  dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
1054
1055  if (dbghelp == NULL) {
1056    jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
1057    CloseHandle(dumpFile);
1058    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1059  }
1060
1061  _MiniDumpWriteDump =
1062      CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
1063                                    PMINIDUMP_EXCEPTION_INFORMATION,
1064                                    PMINIDUMP_USER_STREAM_INFORMATION,
1065                                    PMINIDUMP_CALLBACK_INFORMATION),
1066                                    GetProcAddress(dbghelp,
1067                                    "MiniDumpWriteDump"));
1068
1069  if (_MiniDumpWriteDump == NULL) {
1070    jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
1071    CloseHandle(dumpFile);
1072    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1073  }
1074
1075  dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1076    MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1077
1078  if (siginfo != NULL && context != NULL) {
1079    ep.ContextRecord = (PCONTEXT) context;
1080    ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1081
1082    mei.ThreadId = GetCurrentThreadId();
1083    mei.ExceptionPointers = &ep;
1084    pmei = &mei;
1085  } else {
1086    pmei = NULL;
1087  }
1088
1089  // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1090  // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1091  if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
1092      _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
1093    jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1094  }
1095  CloseHandle(dumpFile);
1096  win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1097}
1098
1099// Die immediately, no exit hook, no abort hook, no cleanup.
1100void os::die() {
1101  win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1102}
1103
1104// Directory routines copied from src/win32/native/java/io/dirent_md.c
1105//  * dirent_md.c       1.15 00/02/02
1106//
1107// The declarations for DIR and struct dirent are in jvm_win32.h.
1108
1109// Caller must have already run dirname through JVM_NativePath, which removes
1110// duplicate slashes and converts all instances of '/' into '\\'.
1111
1112DIR * os::opendir(const char *dirname) {
1113  assert(dirname != NULL, "just checking");   // hotspot change
1114  DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1115  DWORD fattr;                                // hotspot change
1116  char alt_dirname[4] = { 0, 0, 0, 0 };
1117
1118  if (dirp == 0) {
1119    errno = ENOMEM;
1120    return 0;
1121  }
1122
1123  // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1124  // as a directory in FindFirstFile().  We detect this case here and
1125  // prepend the current drive name.
1126  //
1127  if (dirname[1] == '\0' && dirname[0] == '\\') {
1128    alt_dirname[0] = _getdrive() + 'A' - 1;
1129    alt_dirname[1] = ':';
1130    alt_dirname[2] = '\\';
1131    alt_dirname[3] = '\0';
1132    dirname = alt_dirname;
1133  }
1134
1135  dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1136  if (dirp->path == 0) {
1137    free(dirp);
1138    errno = ENOMEM;
1139    return 0;
1140  }
1141  strcpy(dirp->path, dirname);
1142
1143  fattr = GetFileAttributes(dirp->path);
1144  if (fattr == 0xffffffff) {
1145    free(dirp->path);
1146    free(dirp);
1147    errno = ENOENT;
1148    return 0;
1149  } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1150    free(dirp->path);
1151    free(dirp);
1152    errno = ENOTDIR;
1153    return 0;
1154  }
1155
1156  // Append "*.*", or possibly "\\*.*", to path
1157  if (dirp->path[1] == ':' &&
1158      (dirp->path[2] == '\0' ||
1159      (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1160    // No '\\' needed for cases like "Z:" or "Z:\"
1161    strcat(dirp->path, "*.*");
1162  } else {
1163    strcat(dirp->path, "\\*.*");
1164  }
1165
1166  dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1167  if (dirp->handle == INVALID_HANDLE_VALUE) {
1168    if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1169      free(dirp->path);
1170      free(dirp);
1171      errno = EACCES;
1172      return 0;
1173    }
1174  }
1175  return dirp;
1176}
1177
1178// parameter dbuf unused on Windows
1179struct dirent * os::readdir(DIR *dirp, dirent *dbuf) {
1180  assert(dirp != NULL, "just checking");      // hotspot change
1181  if (dirp->handle == INVALID_HANDLE_VALUE) {
1182    return 0;
1183  }
1184
1185  strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1186
1187  if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1188    if (GetLastError() == ERROR_INVALID_HANDLE) {
1189      errno = EBADF;
1190      return 0;
1191    }
1192    FindClose(dirp->handle);
1193    dirp->handle = INVALID_HANDLE_VALUE;
1194  }
1195
1196  return &dirp->dirent;
1197}
1198
1199int os::closedir(DIR *dirp) {
1200  assert(dirp != NULL, "just checking");      // hotspot change
1201  if (dirp->handle != INVALID_HANDLE_VALUE) {
1202    if (!FindClose(dirp->handle)) {
1203      errno = EBADF;
1204      return -1;
1205    }
1206    dirp->handle = INVALID_HANDLE_VALUE;
1207  }
1208  free(dirp->path);
1209  free(dirp);
1210  return 0;
1211}
1212
1213// This must be hard coded because it's the system's temporary
1214// directory not the java application's temp directory, ala java.io.tmpdir.
1215const char* os::get_temp_directory() {
1216  static char path_buf[MAX_PATH];
1217  if (GetTempPath(MAX_PATH, path_buf) > 0) {
1218    return path_buf;
1219  } else {
1220    path_buf[0] = '\0';
1221    return path_buf;
1222  }
1223}
1224
1225static bool file_exists(const char* filename) {
1226  if (filename == NULL || strlen(filename) == 0) {
1227    return false;
1228  }
1229  return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
1230}
1231
1232bool os::dll_build_name(char *buffer, size_t buflen,
1233                        const char* pname, const char* fname) {
1234  bool retval = false;
1235  const size_t pnamelen = pname ? strlen(pname) : 0;
1236  const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
1237
1238  // Return error on buffer overflow.
1239  if (pnamelen + strlen(fname) + 10 > buflen) {
1240    return retval;
1241  }
1242
1243  if (pnamelen == 0) {
1244    jio_snprintf(buffer, buflen, "%s.dll", fname);
1245    retval = true;
1246  } else if (c == ':' || c == '\\') {
1247    jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
1248    retval = true;
1249  } else if (strchr(pname, *os::path_separator()) != NULL) {
1250    int n;
1251    char** pelements = split_path(pname, &n);
1252    if (pelements == NULL) {
1253      return false;
1254    }
1255    for (int i = 0; i < n; i++) {
1256      char* path = pelements[i];
1257      // Really shouldn't be NULL, but check can't hurt
1258      size_t plen = (path == NULL) ? 0 : strlen(path);
1259      if (plen == 0) {
1260        continue; // skip the empty path values
1261      }
1262      const char lastchar = path[plen - 1];
1263      if (lastchar == ':' || lastchar == '\\') {
1264        jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
1265      } else {
1266        jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
1267      }
1268      if (file_exists(buffer)) {
1269        retval = true;
1270        break;
1271      }
1272    }
1273    // release the storage
1274    for (int i = 0; i < n; i++) {
1275      if (pelements[i] != NULL) {
1276        FREE_C_HEAP_ARRAY(char, pelements[i]);
1277      }
1278    }
1279    if (pelements != NULL) {
1280      FREE_C_HEAP_ARRAY(char*, pelements);
1281    }
1282  } else {
1283    jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
1284    retval = true;
1285  }
1286  return retval;
1287}
1288
1289// Needs to be in os specific directory because windows requires another
1290// header file <direct.h>
1291const char* os::get_current_directory(char *buf, size_t buflen) {
1292  int n = static_cast<int>(buflen);
1293  if (buflen > INT_MAX)  n = INT_MAX;
1294  return _getcwd(buf, n);
1295}
1296
1297//-----------------------------------------------------------
1298// Helper functions for fatal error handler
1299#ifdef _WIN64
1300// Helper routine which returns true if address in
1301// within the NTDLL address space.
1302//
1303static bool _addr_in_ntdll(address addr) {
1304  HMODULE hmod;
1305  MODULEINFO minfo;
1306
1307  hmod = GetModuleHandle("NTDLL.DLL");
1308  if (hmod == NULL) return false;
1309  if (!GetModuleInformation(GetCurrentProcess(), hmod,
1310                                          &minfo, sizeof(MODULEINFO))) {
1311    return false;
1312  }
1313
1314  if ((addr >= minfo.lpBaseOfDll) &&
1315      (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1316    return true;
1317  } else {
1318    return false;
1319  }
1320}
1321#endif
1322
1323struct _modinfo {
1324  address addr;
1325  char*   full_path;   // point to a char buffer
1326  int     buflen;      // size of the buffer
1327  address base_addr;
1328};
1329
1330static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1331                                  address top_address, void * param) {
1332  struct _modinfo *pmod = (struct _modinfo *)param;
1333  if (!pmod) return -1;
1334
1335  if (base_addr   <= pmod->addr &&
1336      top_address > pmod->addr) {
1337    // if a buffer is provided, copy path name to the buffer
1338    if (pmod->full_path) {
1339      jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1340    }
1341    pmod->base_addr = base_addr;
1342    return 1;
1343  }
1344  return 0;
1345}
1346
1347bool os::dll_address_to_library_name(address addr, char* buf,
1348                                     int buflen, int* offset) {
1349  // buf is not optional, but offset is optional
1350  assert(buf != NULL, "sanity check");
1351
1352// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1353//       return the full path to the DLL file, sometimes it returns path
1354//       to the corresponding PDB file (debug info); sometimes it only
1355//       returns partial path, which makes life painful.
1356
1357  struct _modinfo mi;
1358  mi.addr      = addr;
1359  mi.full_path = buf;
1360  mi.buflen    = buflen;
1361  if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1362    // buf already contains path name
1363    if (offset) *offset = addr - mi.base_addr;
1364    return true;
1365  }
1366
1367  buf[0] = '\0';
1368  if (offset) *offset = -1;
1369  return false;
1370}
1371
1372bool os::dll_address_to_function_name(address addr, char *buf,
1373                                      int buflen, int *offset,
1374                                      bool demangle) {
1375  // buf is not optional, but offset is optional
1376  assert(buf != NULL, "sanity check");
1377
1378  if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1379    return true;
1380  }
1381  if (offset != NULL)  *offset  = -1;
1382  buf[0] = '\0';
1383  return false;
1384}
1385
1386// save the start and end address of jvm.dll into param[0] and param[1]
1387static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1388                           address top_address, void * param) {
1389  if (!param) return -1;
1390
1391  if (base_addr   <= (address)_locate_jvm_dll &&
1392      top_address > (address)_locate_jvm_dll) {
1393    ((address*)param)[0] = base_addr;
1394    ((address*)param)[1] = top_address;
1395    return 1;
1396  }
1397  return 0;
1398}
1399
1400address vm_lib_location[2];    // start and end address of jvm.dll
1401
1402// check if addr is inside jvm.dll
1403bool os::address_is_in_vm(address addr) {
1404  if (!vm_lib_location[0] || !vm_lib_location[1]) {
1405    if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1406      assert(false, "Can't find jvm module.");
1407      return false;
1408    }
1409  }
1410
1411  return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1412}
1413
1414// print module info; param is outputStream*
1415static int _print_module(const char* fname, address base_address,
1416                         address top_address, void* param) {
1417  if (!param) return -1;
1418
1419  outputStream* st = (outputStream*)param;
1420
1421  st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1422  return 0;
1423}
1424
1425// Loads .dll/.so and
1426// in case of error it checks if .dll/.so was built for the
1427// same architecture as Hotspot is running on
1428void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1429  void * result = LoadLibrary(name);
1430  if (result != NULL) {
1431    return result;
1432  }
1433
1434  DWORD errcode = GetLastError();
1435  if (errcode == ERROR_MOD_NOT_FOUND) {
1436    strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1437    ebuf[ebuflen - 1] = '\0';
1438    return NULL;
1439  }
1440
1441  // Parsing dll below
1442  // If we can read dll-info and find that dll was built
1443  // for an architecture other than Hotspot is running in
1444  // - then print to buffer "DLL was built for a different architecture"
1445  // else call os::lasterror to obtain system error message
1446
1447  // Read system error message into ebuf
1448  // It may or may not be overwritten below (in the for loop and just above)
1449  lasterror(ebuf, (size_t) ebuflen);
1450  ebuf[ebuflen - 1] = '\0';
1451  int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1452  if (fd < 0) {
1453    return NULL;
1454  }
1455
1456  uint32_t signature_offset;
1457  uint16_t lib_arch = 0;
1458  bool failed_to_get_lib_arch =
1459    ( // Go to position 3c in the dll
1460     (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1461     ||
1462     // Read location of signature
1463     (sizeof(signature_offset) !=
1464     (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1465     ||
1466     // Go to COFF File Header in dll
1467     // that is located after "signature" (4 bytes long)
1468     (os::seek_to_file_offset(fd,
1469     signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1470     ||
1471     // Read field that contains code of architecture
1472     // that dll was built for
1473     (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1474    );
1475
1476  ::close(fd);
1477  if (failed_to_get_lib_arch) {
1478    // file i/o error - report os::lasterror(...) msg
1479    return NULL;
1480  }
1481
1482  typedef struct {
1483    uint16_t arch_code;
1484    char* arch_name;
1485  } arch_t;
1486
1487  static const arch_t arch_array[] = {
1488    {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1489    {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1490    {IMAGE_FILE_MACHINE_IA64,      (char*)"IA 64"}
1491  };
1492#if   (defined _M_IA64)
1493  static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64;
1494#elif (defined _M_AMD64)
1495  static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1496#elif (defined _M_IX86)
1497  static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1498#else
1499  #error Method os::dll_load requires that one of following \
1500         is defined :_M_IA64,_M_AMD64 or _M_IX86
1501#endif
1502
1503
1504  // Obtain a string for printf operation
1505  // lib_arch_str shall contain string what platform this .dll was built for
1506  // running_arch_str shall string contain what platform Hotspot was built for
1507  char *running_arch_str = NULL, *lib_arch_str = NULL;
1508  for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1509    if (lib_arch == arch_array[i].arch_code) {
1510      lib_arch_str = arch_array[i].arch_name;
1511    }
1512    if (running_arch == arch_array[i].arch_code) {
1513      running_arch_str = arch_array[i].arch_name;
1514    }
1515  }
1516
1517  assert(running_arch_str,
1518         "Didn't find running architecture code in arch_array");
1519
1520  // If the architecture is right
1521  // but some other error took place - report os::lasterror(...) msg
1522  if (lib_arch == running_arch) {
1523    return NULL;
1524  }
1525
1526  if (lib_arch_str != NULL) {
1527    ::_snprintf(ebuf, ebuflen - 1,
1528                "Can't load %s-bit .dll on a %s-bit platform",
1529                lib_arch_str, running_arch_str);
1530  } else {
1531    // don't know what architecture this dll was build for
1532    ::_snprintf(ebuf, ebuflen - 1,
1533                "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1534                lib_arch, running_arch_str);
1535  }
1536
1537  return NULL;
1538}
1539
1540void os::print_dll_info(outputStream *st) {
1541  st->print_cr("Dynamic libraries:");
1542  get_loaded_modules_info(_print_module, (void *)st);
1543}
1544
1545int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1546  HANDLE   hProcess;
1547
1548# define MAX_NUM_MODULES 128
1549  HMODULE     modules[MAX_NUM_MODULES];
1550  static char filename[MAX_PATH];
1551  int         result = 0;
1552
1553  int pid = os::current_process_id();
1554  hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1555                         FALSE, pid);
1556  if (hProcess == NULL) return 0;
1557
1558  DWORD size_needed;
1559  if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1560    CloseHandle(hProcess);
1561    return 0;
1562  }
1563
1564  // number of modules that are currently loaded
1565  int num_modules = size_needed / sizeof(HMODULE);
1566
1567  for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1568    // Get Full pathname:
1569    if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1570      filename[0] = '\0';
1571    }
1572
1573    MODULEINFO modinfo;
1574    if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1575      modinfo.lpBaseOfDll = NULL;
1576      modinfo.SizeOfImage = 0;
1577    }
1578
1579    // Invoke callback function
1580    result = callback(filename, (address)modinfo.lpBaseOfDll,
1581                      (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1582    if (result) break;
1583  }
1584
1585  CloseHandle(hProcess);
1586  return result;
1587}
1588
1589bool os::get_host_name(char* buf, size_t buflen) {
1590  DWORD size = (DWORD)buflen;
1591  return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1592}
1593
1594void os::get_summary_os_info(char* buf, size_t buflen) {
1595  stringStream sst(buf, buflen);
1596  os::win32::print_windows_version(&sst);
1597  // chop off newline character
1598  char* nl = strchr(buf, '\n');
1599  if (nl != NULL) *nl = '\0';
1600}
1601
1602int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1603  int ret = vsnprintf(buf, len, fmt, args);
1604  // Get the correct buffer size if buf is too small
1605  if (ret < 0) {
1606    return _vscprintf(fmt, args);
1607  }
1608  return ret;
1609}
1610
1611static inline time_t get_mtime(const char* filename) {
1612  struct stat st;
1613  int ret = os::stat(filename, &st);
1614  assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
1615  return st.st_mtime;
1616}
1617
1618int os::compare_file_modified_times(const char* file1, const char* file2) {
1619  time_t t1 = get_mtime(file1);
1620  time_t t2 = get_mtime(file2);
1621  return t1 - t2;
1622}
1623
1624void os::print_os_info_brief(outputStream* st) {
1625  os::print_os_info(st);
1626}
1627
1628void os::print_os_info(outputStream* st) {
1629#ifdef ASSERT
1630  char buffer[1024];
1631  st->print("HostName: ");
1632  if (get_host_name(buffer, sizeof(buffer))) {
1633    st->print("%s ", buffer);
1634  } else {
1635    st->print("N/A ");
1636  }
1637#endif
1638  st->print("OS:");
1639  os::win32::print_windows_version(st);
1640}
1641
1642void os::win32::print_windows_version(outputStream* st) {
1643  OSVERSIONINFOEX osvi;
1644  VS_FIXEDFILEINFO *file_info;
1645  TCHAR kernel32_path[MAX_PATH];
1646  UINT len, ret;
1647
1648  // Use the GetVersionEx information to see if we're on a server or
1649  // workstation edition of Windows. Starting with Windows 8.1 we can't
1650  // trust the OS version information returned by this API.
1651  ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1652  osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1653  if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1654    st->print_cr("Call to GetVersionEx failed");
1655    return;
1656  }
1657  bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1658
1659  // Get the full path to \Windows\System32\kernel32.dll and use that for
1660  // determining what version of Windows we're running on.
1661  len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1662  ret = GetSystemDirectory(kernel32_path, len);
1663  if (ret == 0 || ret > len) {
1664    st->print_cr("Call to GetSystemDirectory failed");
1665    return;
1666  }
1667  strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1668
1669  DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1670  if (version_size == 0) {
1671    st->print_cr("Call to GetFileVersionInfoSize failed");
1672    return;
1673  }
1674
1675  LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1676  if (version_info == NULL) {
1677    st->print_cr("Failed to allocate version_info");
1678    return;
1679  }
1680
1681  if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1682    os::free(version_info);
1683    st->print_cr("Call to GetFileVersionInfo failed");
1684    return;
1685  }
1686
1687  if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1688    os::free(version_info);
1689    st->print_cr("Call to VerQueryValue failed");
1690    return;
1691  }
1692
1693  int major_version = HIWORD(file_info->dwProductVersionMS);
1694  int minor_version = LOWORD(file_info->dwProductVersionMS);
1695  int build_number = HIWORD(file_info->dwProductVersionLS);
1696  int build_minor = LOWORD(file_info->dwProductVersionLS);
1697  int os_vers = major_version * 1000 + minor_version;
1698  os::free(version_info);
1699
1700  st->print(" Windows ");
1701  switch (os_vers) {
1702
1703  case 6000:
1704    if (is_workstation) {
1705      st->print("Vista");
1706    } else {
1707      st->print("Server 2008");
1708    }
1709    break;
1710
1711  case 6001:
1712    if (is_workstation) {
1713      st->print("7");
1714    } else {
1715      st->print("Server 2008 R2");
1716    }
1717    break;
1718
1719  case 6002:
1720    if (is_workstation) {
1721      st->print("8");
1722    } else {
1723      st->print("Server 2012");
1724    }
1725    break;
1726
1727  case 6003:
1728    if (is_workstation) {
1729      st->print("8.1");
1730    } else {
1731      st->print("Server 2012 R2");
1732    }
1733    break;
1734
1735  case 10000:
1736    if (is_workstation) {
1737      st->print("10");
1738    } else {
1739      st->print("Server 2016");
1740    }
1741    break;
1742
1743  default:
1744    // Unrecognized windows, print out its major and minor versions
1745    st->print("%d.%d", major_version, minor_version);
1746    break;
1747  }
1748
1749  // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1750  // find out whether we are running on 64 bit processor or not
1751  SYSTEM_INFO si;
1752  ZeroMemory(&si, sizeof(SYSTEM_INFO));
1753  GetNativeSystemInfo(&si);
1754  if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1755    st->print(" , 64 bit");
1756  }
1757
1758  st->print(" Build %d", build_number);
1759  st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1760  st->cr();
1761}
1762
1763void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1764  // Nothing to do for now.
1765}
1766
1767void os::get_summary_cpu_info(char* buf, size_t buflen) {
1768  HKEY key;
1769  DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1770               "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1771  if (status == ERROR_SUCCESS) {
1772    DWORD size = (DWORD)buflen;
1773    status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1774    if (status != ERROR_SUCCESS) {
1775        strncpy(buf, "## __CPU__", buflen);
1776    }
1777    RegCloseKey(key);
1778  } else {
1779    // Put generic cpu info to return
1780    strncpy(buf, "## __CPU__", buflen);
1781  }
1782}
1783
1784void os::print_memory_info(outputStream* st) {
1785  st->print("Memory:");
1786  st->print(" %dk page", os::vm_page_size()>>10);
1787
1788  // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1789  // value if total memory is larger than 4GB
1790  MEMORYSTATUSEX ms;
1791  ms.dwLength = sizeof(ms);
1792  GlobalMemoryStatusEx(&ms);
1793
1794  st->print(", physical %uk", os::physical_memory() >> 10);
1795  st->print("(%uk free)", os::available_memory() >> 10);
1796
1797  st->print(", swap %uk", ms.ullTotalPageFile >> 10);
1798  st->print("(%uk free)", ms.ullAvailPageFile >> 10);
1799  st->cr();
1800}
1801
1802void os::print_siginfo(outputStream *st, const void* siginfo) {
1803  const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1804  st->print("siginfo:");
1805
1806  char tmp[64];
1807  if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1808    strcpy(tmp, "EXCEPTION_??");
1809  }
1810  st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1811
1812  if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1813       er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1814       er->NumberParameters >= 2) {
1815    switch (er->ExceptionInformation[0]) {
1816    case 0: st->print(", reading address"); break;
1817    case 1: st->print(", writing address"); break;
1818    case 8: st->print(", data execution prevention violation at address"); break;
1819    default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1820                       er->ExceptionInformation[0]);
1821    }
1822    st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1823  } else {
1824    int num = er->NumberParameters;
1825    if (num > 0) {
1826      st->print(", ExceptionInformation=");
1827      for (int i = 0; i < num; i++) {
1828        st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1829      }
1830    }
1831  }
1832  st->cr();
1833}
1834
1835void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1836  // do nothing
1837}
1838
1839static char saved_jvm_path[MAX_PATH] = {0};
1840
1841// Find the full path to the current module, jvm.dll
1842void os::jvm_path(char *buf, jint buflen) {
1843  // Error checking.
1844  if (buflen < MAX_PATH) {
1845    assert(false, "must use a large-enough buffer");
1846    buf[0] = '\0';
1847    return;
1848  }
1849  // Lazy resolve the path to current module.
1850  if (saved_jvm_path[0] != 0) {
1851    strcpy(buf, saved_jvm_path);
1852    return;
1853  }
1854
1855  buf[0] = '\0';
1856  if (Arguments::sun_java_launcher_is_altjvm()) {
1857    // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1858    // for a JAVA_HOME environment variable and fix up the path so it
1859    // looks like jvm.dll is installed there (append a fake suffix
1860    // hotspot/jvm.dll).
1861    char* java_home_var = ::getenv("JAVA_HOME");
1862    if (java_home_var != NULL && java_home_var[0] != 0 &&
1863        strlen(java_home_var) < (size_t)buflen) {
1864      strncpy(buf, java_home_var, buflen);
1865
1866      // determine if this is a legacy image or modules image
1867      // modules image doesn't have "jre" subdirectory
1868      size_t len = strlen(buf);
1869      char* jrebin_p = buf + len;
1870      jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1871      if (0 != _access(buf, 0)) {
1872        jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1873      }
1874      len = strlen(buf);
1875      jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1876    }
1877  }
1878
1879  if (buf[0] == '\0') {
1880    GetModuleFileName(vm_lib_handle, buf, buflen);
1881  }
1882  strncpy(saved_jvm_path, buf, MAX_PATH);
1883  saved_jvm_path[MAX_PATH - 1] = '\0';
1884}
1885
1886
1887void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1888#ifndef _WIN64
1889  st->print("_");
1890#endif
1891}
1892
1893
1894void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1895#ifndef _WIN64
1896  st->print("@%d", args_size  * sizeof(int));
1897#endif
1898}
1899
1900// This method is a copy of JDK's sysGetLastErrorString
1901// from src/windows/hpi/src/system_md.c
1902
1903size_t os::lasterror(char* buf, size_t len) {
1904  DWORD errval;
1905
1906  if ((errval = GetLastError()) != 0) {
1907    // DOS error
1908    size_t n = (size_t)FormatMessage(
1909                                     FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1910                                     NULL,
1911                                     errval,
1912                                     0,
1913                                     buf,
1914                                     (DWORD)len,
1915                                     NULL);
1916    if (n > 3) {
1917      // Drop final '.', CR, LF
1918      if (buf[n - 1] == '\n') n--;
1919      if (buf[n - 1] == '\r') n--;
1920      if (buf[n - 1] == '.') n--;
1921      buf[n] = '\0';
1922    }
1923    return n;
1924  }
1925
1926  if (errno != 0) {
1927    // C runtime error that has no corresponding DOS error code
1928    const char* s = os::strerror(errno);
1929    size_t n = strlen(s);
1930    if (n >= len) n = len - 1;
1931    strncpy(buf, s, n);
1932    buf[n] = '\0';
1933    return n;
1934  }
1935
1936  return 0;
1937}
1938
1939int os::get_last_error() {
1940  DWORD error = GetLastError();
1941  if (error == 0) {
1942    error = errno;
1943  }
1944  return (int)error;
1945}
1946
1947WindowsSemaphore::WindowsSemaphore(uint value) {
1948  _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL);
1949
1950  guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError());
1951}
1952
1953WindowsSemaphore::~WindowsSemaphore() {
1954  ::CloseHandle(_semaphore);
1955}
1956
1957void WindowsSemaphore::signal(uint count) {
1958  if (count > 0) {
1959    BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL);
1960
1961    assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError());
1962  }
1963}
1964
1965void WindowsSemaphore::wait() {
1966  DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE);
1967  assert(ret != WAIT_FAILED,   "WaitForSingleObject failed with error code: %lu", GetLastError());
1968  assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret);
1969}
1970
1971// sun.misc.Signal
1972// NOTE that this is a workaround for an apparent kernel bug where if
1973// a signal handler for SIGBREAK is installed then that signal handler
1974// takes priority over the console control handler for CTRL_CLOSE_EVENT.
1975// See bug 4416763.
1976static void (*sigbreakHandler)(int) = NULL;
1977
1978static void UserHandler(int sig, void *siginfo, void *context) {
1979  os::signal_notify(sig);
1980  // We need to reinstate the signal handler each time...
1981  os::signal(sig, (void*)UserHandler);
1982}
1983
1984void* os::user_handler() {
1985  return (void*) UserHandler;
1986}
1987
1988void* os::signal(int signal_number, void* handler) {
1989  if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1990    void (*oldHandler)(int) = sigbreakHandler;
1991    sigbreakHandler = (void (*)(int)) handler;
1992    return (void*) oldHandler;
1993  } else {
1994    return (void*)::signal(signal_number, (void (*)(int))handler);
1995  }
1996}
1997
1998void os::signal_raise(int signal_number) {
1999  raise(signal_number);
2000}
2001
2002// The Win32 C runtime library maps all console control events other than ^C
2003// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2004// logoff, and shutdown events.  We therefore install our own console handler
2005// that raises SIGTERM for the latter cases.
2006//
2007static BOOL WINAPI consoleHandler(DWORD event) {
2008  switch (event) {
2009  case CTRL_C_EVENT:
2010    if (is_error_reported()) {
2011      // Ctrl-C is pressed during error reporting, likely because the error
2012      // handler fails to abort. Let VM die immediately.
2013      os::die();
2014    }
2015
2016    os::signal_raise(SIGINT);
2017    return TRUE;
2018    break;
2019  case CTRL_BREAK_EVENT:
2020    if (sigbreakHandler != NULL) {
2021      (*sigbreakHandler)(SIGBREAK);
2022    }
2023    return TRUE;
2024    break;
2025  case CTRL_LOGOFF_EVENT: {
2026    // Don't terminate JVM if it is running in a non-interactive session,
2027    // such as a service process.
2028    USEROBJECTFLAGS flags;
2029    HANDLE handle = GetProcessWindowStation();
2030    if (handle != NULL &&
2031        GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2032        sizeof(USEROBJECTFLAGS), NULL)) {
2033      // If it is a non-interactive session, let next handler to deal
2034      // with it.
2035      if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2036        return FALSE;
2037      }
2038    }
2039  }
2040  case CTRL_CLOSE_EVENT:
2041  case CTRL_SHUTDOWN_EVENT:
2042    os::signal_raise(SIGTERM);
2043    return TRUE;
2044    break;
2045  default:
2046    break;
2047  }
2048  return FALSE;
2049}
2050
2051// The following code is moved from os.cpp for making this
2052// code platform specific, which it is by its very nature.
2053
2054// Return maximum OS signal used + 1 for internal use only
2055// Used as exit signal for signal_thread
2056int os::sigexitnum_pd() {
2057  return NSIG;
2058}
2059
2060// a counter for each possible signal value, including signal_thread exit signal
2061static volatile jint pending_signals[NSIG+1] = { 0 };
2062static HANDLE sig_sem = NULL;
2063
2064void os::signal_init_pd() {
2065  // Initialize signal structures
2066  memset((void*)pending_signals, 0, sizeof(pending_signals));
2067
2068  sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL);
2069
2070  // Programs embedding the VM do not want it to attempt to receive
2071  // events like CTRL_LOGOFF_EVENT, which are used to implement the
2072  // shutdown hooks mechanism introduced in 1.3.  For example, when
2073  // the VM is run as part of a Windows NT service (i.e., a servlet
2074  // engine in a web server), the correct behavior is for any console
2075  // control handler to return FALSE, not TRUE, because the OS's
2076  // "final" handler for such events allows the process to continue if
2077  // it is a service (while terminating it if it is not a service).
2078  // To make this behavior uniform and the mechanism simpler, we
2079  // completely disable the VM's usage of these console events if -Xrs
2080  // (=ReduceSignalUsage) is specified.  This means, for example, that
2081  // the CTRL-BREAK thread dump mechanism is also disabled in this
2082  // case.  See bugs 4323062, 4345157, and related bugs.
2083
2084  if (!ReduceSignalUsage) {
2085    // Add a CTRL-C handler
2086    SetConsoleCtrlHandler(consoleHandler, TRUE);
2087  }
2088}
2089
2090void os::signal_notify(int signal_number) {
2091  BOOL ret;
2092  if (sig_sem != NULL) {
2093    Atomic::inc(&pending_signals[signal_number]);
2094    ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2095    assert(ret != 0, "ReleaseSemaphore() failed");
2096  }
2097}
2098
2099static int check_pending_signals(bool wait_for_signal) {
2100  DWORD ret;
2101  while (true) {
2102    for (int i = 0; i < NSIG + 1; i++) {
2103      jint n = pending_signals[i];
2104      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2105        return i;
2106      }
2107    }
2108    if (!wait_for_signal) {
2109      return -1;
2110    }
2111
2112    JavaThread *thread = JavaThread::current();
2113
2114    ThreadBlockInVM tbivm(thread);
2115
2116    bool threadIsSuspended;
2117    do {
2118      thread->set_suspend_equivalent();
2119      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2120      ret = ::WaitForSingleObject(sig_sem, INFINITE);
2121      assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed");
2122
2123      // were we externally suspended while we were waiting?
2124      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2125      if (threadIsSuspended) {
2126        // The semaphore has been incremented, but while we were waiting
2127        // another thread suspended us. We don't want to continue running
2128        // while suspended because that would surprise the thread that
2129        // suspended us.
2130        ret = ::ReleaseSemaphore(sig_sem, 1, NULL);
2131        assert(ret != 0, "ReleaseSemaphore() failed");
2132
2133        thread->java_suspend_self();
2134      }
2135    } while (threadIsSuspended);
2136  }
2137}
2138
2139int os::signal_lookup() {
2140  return check_pending_signals(false);
2141}
2142
2143int os::signal_wait() {
2144  return check_pending_signals(true);
2145}
2146
2147// Implicit OS exception handling
2148
2149LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2150                      address handler) {
2151    JavaThread* thread = (JavaThread*) Thread::current_or_null();
2152  // Save pc in thread
2153#ifdef _M_IA64
2154  // Do not blow up if no thread info available.
2155  if (thread) {
2156    // Saving PRECISE pc (with slot information) in thread.
2157    uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress;
2158    // Convert precise PC into "Unix" format
2159    precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2);
2160    thread->set_saved_exception_pc((address)precise_pc);
2161  }
2162  // Set pc to handler
2163  exceptionInfo->ContextRecord->StIIP = (DWORD64)handler;
2164  // Clear out psr.ri (= Restart Instruction) in order to continue
2165  // at the beginning of the target bundle.
2166  exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF;
2167  assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!");
2168#else
2169  #ifdef _M_AMD64
2170  // Do not blow up if no thread info available.
2171  if (thread) {
2172    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2173  }
2174  // Set pc to handler
2175  exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2176  #else
2177  // Do not blow up if no thread info available.
2178  if (thread) {
2179    thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2180  }
2181  // Set pc to handler
2182  exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2183  #endif
2184#endif
2185
2186  // Continue the execution
2187  return EXCEPTION_CONTINUE_EXECUTION;
2188}
2189
2190
2191// Used for PostMortemDump
2192extern "C" void safepoints();
2193extern "C" void find(int x);
2194extern "C" void events();
2195
2196// According to Windows API documentation, an illegal instruction sequence should generate
2197// the 0xC000001C exception code. However, real world experience shows that occasionnaly
2198// the execution of an illegal instruction can generate the exception code 0xC000001E. This
2199// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2200
2201#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2202
2203// From "Execution Protection in the Windows Operating System" draft 0.35
2204// Once a system header becomes available, the "real" define should be
2205// included or copied here.
2206#define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2207
2208// Handle NAT Bit consumption on IA64.
2209#ifdef _M_IA64
2210  #define EXCEPTION_REG_NAT_CONSUMPTION    STATUS_REG_NAT_CONSUMPTION
2211#endif
2212
2213// Windows Vista/2008 heap corruption check
2214#define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2215
2216// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2217// C++ compiler contain this error code. Because this is a compiler-generated
2218// error, the code is not listed in the Win32 API header files.
2219// The code is actually a cryptic mnemonic device, with the initial "E"
2220// standing for "exception" and the final 3 bytes (0x6D7363) representing the
2221// ASCII values of "msc".
2222
2223#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2224
2225#define def_excpt(val) { #val, (val) }
2226
2227static const struct { char* name; uint number; } exceptlabels[] = {
2228    def_excpt(EXCEPTION_ACCESS_VIOLATION),
2229    def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2230    def_excpt(EXCEPTION_BREAKPOINT),
2231    def_excpt(EXCEPTION_SINGLE_STEP),
2232    def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2233    def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2234    def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2235    def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2236    def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2237    def_excpt(EXCEPTION_FLT_OVERFLOW),
2238    def_excpt(EXCEPTION_FLT_STACK_CHECK),
2239    def_excpt(EXCEPTION_FLT_UNDERFLOW),
2240    def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2241    def_excpt(EXCEPTION_INT_OVERFLOW),
2242    def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2243    def_excpt(EXCEPTION_IN_PAGE_ERROR),
2244    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2245    def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2246    def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2247    def_excpt(EXCEPTION_STACK_OVERFLOW),
2248    def_excpt(EXCEPTION_INVALID_DISPOSITION),
2249    def_excpt(EXCEPTION_GUARD_PAGE),
2250    def_excpt(EXCEPTION_INVALID_HANDLE),
2251    def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2252    def_excpt(EXCEPTION_HEAP_CORRUPTION)
2253#ifdef _M_IA64
2254    , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION)
2255#endif
2256};
2257
2258#undef def_excpt
2259
2260const char* os::exception_name(int exception_code, char *buf, size_t size) {
2261  uint code = static_cast<uint>(exception_code);
2262  for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2263    if (exceptlabels[i].number == code) {
2264      jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2265      return buf;
2266    }
2267  }
2268
2269  return NULL;
2270}
2271
2272//-----------------------------------------------------------------------------
2273LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2274  // handle exception caused by idiv; should only happen for -MinInt/-1
2275  // (division by zero is handled explicitly)
2276#ifdef _M_IA64
2277  assert(0, "Fix Handle_IDiv_Exception");
2278#else
2279  #ifdef  _M_AMD64
2280  PCONTEXT ctx = exceptionInfo->ContextRecord;
2281  address pc = (address)ctx->Rip;
2282  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2283  assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2284  if (pc[0] == 0xF7) {
2285    // set correct result values and continue after idiv instruction
2286    ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2287  } else {
2288    ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2289  }
2290  // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2291  // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2292  // idiv opcode (0xF7).
2293  ctx->Rdx = (DWORD)0;             // remainder
2294  // Continue the execution
2295  #else
2296  PCONTEXT ctx = exceptionInfo->ContextRecord;
2297  address pc = (address)ctx->Eip;
2298  assert(pc[0] == 0xF7, "not an idiv opcode");
2299  assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2300  assert(ctx->Eax == min_jint, "unexpected idiv exception");
2301  // set correct result values and continue after idiv instruction
2302  ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2303  ctx->Eax = (DWORD)min_jint;      // result
2304  ctx->Edx = (DWORD)0;             // remainder
2305  // Continue the execution
2306  #endif
2307#endif
2308  return EXCEPTION_CONTINUE_EXECUTION;
2309}
2310
2311//-----------------------------------------------------------------------------
2312LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2313  PCONTEXT ctx = exceptionInfo->ContextRecord;
2314#ifndef  _WIN64
2315  // handle exception caused by native method modifying control word
2316  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2317
2318  switch (exception_code) {
2319  case EXCEPTION_FLT_DENORMAL_OPERAND:
2320  case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2321  case EXCEPTION_FLT_INEXACT_RESULT:
2322  case EXCEPTION_FLT_INVALID_OPERATION:
2323  case EXCEPTION_FLT_OVERFLOW:
2324  case EXCEPTION_FLT_STACK_CHECK:
2325  case EXCEPTION_FLT_UNDERFLOW:
2326    jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2327    if (fp_control_word != ctx->FloatSave.ControlWord) {
2328      // Restore FPCW and mask out FLT exceptions
2329      ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2330      // Mask out pending FLT exceptions
2331      ctx->FloatSave.StatusWord &=  0xffffff00;
2332      return EXCEPTION_CONTINUE_EXECUTION;
2333    }
2334  }
2335
2336  if (prev_uef_handler != NULL) {
2337    // We didn't handle this exception so pass it to the previous
2338    // UnhandledExceptionFilter.
2339    return (prev_uef_handler)(exceptionInfo);
2340  }
2341#else // !_WIN64
2342  // On Windows, the mxcsr control bits are non-volatile across calls
2343  // See also CR 6192333
2344  //
2345  jint MxCsr = INITIAL_MXCSR;
2346  // we can't use StubRoutines::addr_mxcsr_std()
2347  // because in Win64 mxcsr is not saved there
2348  if (MxCsr != ctx->MxCsr) {
2349    ctx->MxCsr = MxCsr;
2350    return EXCEPTION_CONTINUE_EXECUTION;
2351  }
2352#endif // !_WIN64
2353
2354  return EXCEPTION_CONTINUE_SEARCH;
2355}
2356
2357static inline void report_error(Thread* t, DWORD exception_code,
2358                                address addr, void* siginfo, void* context) {
2359  VMError::report_and_die(t, exception_code, addr, siginfo, context);
2360
2361  // If UseOsErrorReporting, this will return here and save the error file
2362  // somewhere where we can find it in the minidump.
2363}
2364
2365bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2366        struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2367  PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2368  address addr = (address) exceptionRecord->ExceptionInformation[1];
2369  if (Interpreter::contains(pc)) {
2370    *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2371    if (!fr->is_first_java_frame()) {
2372      assert(fr->safe_for_sender(thread), "Safety check");
2373      *fr = fr->java_sender();
2374    }
2375  } else {
2376    // more complex code with compiled code
2377    assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2378    CodeBlob* cb = CodeCache::find_blob(pc);
2379    if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2380      // Not sure where the pc points to, fallback to default
2381      // stack overflow handling
2382      return false;
2383    } else {
2384      *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2385      // in compiled code, the stack banging is performed just after the return pc
2386      // has been pushed on the stack
2387      *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2388      if (!fr->is_java_frame()) {
2389        assert(fr->safe_for_sender(thread), "Safety check");
2390        *fr = fr->java_sender();
2391      }
2392    }
2393  }
2394  assert(fr->is_java_frame(), "Safety check");
2395  return true;
2396}
2397
2398//-----------------------------------------------------------------------------
2399LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2400  if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2401  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2402#ifdef _M_IA64
2403  // On Itanium, we need the "precise pc", which has the slot number coded
2404  // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format).
2405  address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress;
2406  // Convert the pc to "Unix format", which has the slot number coded
2407  // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2
2408  // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction"
2409  // information is saved in the Unix format.
2410  address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2));
2411#else
2412  #ifdef _M_AMD64
2413  address pc = (address) exceptionInfo->ContextRecord->Rip;
2414  #else
2415  address pc = (address) exceptionInfo->ContextRecord->Eip;
2416  #endif
2417#endif
2418  Thread* t = Thread::current_or_null_safe();
2419
2420  // Handle SafeFetch32 and SafeFetchN exceptions.
2421  if (StubRoutines::is_safefetch_fault(pc)) {
2422    return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2423  }
2424
2425#ifndef _WIN64
2426  // Execution protection violation - win32 running on AMD64 only
2427  // Handled first to avoid misdiagnosis as a "normal" access violation;
2428  // This is safe to do because we have a new/unique ExceptionInformation
2429  // code for this condition.
2430  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2431    PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2432    int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2433    address addr = (address) exceptionRecord->ExceptionInformation[1];
2434
2435    if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2436      int page_size = os::vm_page_size();
2437
2438      // Make sure the pc and the faulting address are sane.
2439      //
2440      // If an instruction spans a page boundary, and the page containing
2441      // the beginning of the instruction is executable but the following
2442      // page is not, the pc and the faulting address might be slightly
2443      // different - we still want to unguard the 2nd page in this case.
2444      //
2445      // 15 bytes seems to be a (very) safe value for max instruction size.
2446      bool pc_is_near_addr =
2447        (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2448      bool instr_spans_page_boundary =
2449        (align_size_down((intptr_t) pc ^ (intptr_t) addr,
2450                         (intptr_t) page_size) > 0);
2451
2452      if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2453        static volatile address last_addr =
2454          (address) os::non_memory_address_word();
2455
2456        // In conservative mode, don't unguard unless the address is in the VM
2457        if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2458            (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2459
2460          // Set memory to RWX and retry
2461          address page_start =
2462            (address) align_size_down((intptr_t) addr, (intptr_t) page_size);
2463          bool res = os::protect_memory((char*) page_start, page_size,
2464                                        os::MEM_PROT_RWX);
2465
2466          log_debug(os)("Execution protection violation "
2467                        "at " INTPTR_FORMAT
2468                        ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2469                        p2i(page_start), (res ? "success" : os::strerror(errno)));
2470
2471          // Set last_addr so if we fault again at the same address, we don't
2472          // end up in an endless loop.
2473          //
2474          // There are two potential complications here.  Two threads trapping
2475          // at the same address at the same time could cause one of the
2476          // threads to think it already unguarded, and abort the VM.  Likely
2477          // very rare.
2478          //
2479          // The other race involves two threads alternately trapping at
2480          // different addresses and failing to unguard the page, resulting in
2481          // an endless loop.  This condition is probably even more unlikely
2482          // than the first.
2483          //
2484          // Although both cases could be avoided by using locks or thread
2485          // local last_addr, these solutions are unnecessary complication:
2486          // this handler is a best-effort safety net, not a complete solution.
2487          // It is disabled by default and should only be used as a workaround
2488          // in case we missed any no-execute-unsafe VM code.
2489
2490          last_addr = addr;
2491
2492          return EXCEPTION_CONTINUE_EXECUTION;
2493        }
2494      }
2495
2496      // Last unguard failed or not unguarding
2497      tty->print_raw_cr("Execution protection violation");
2498      report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2499                   exceptionInfo->ContextRecord);
2500      return EXCEPTION_CONTINUE_SEARCH;
2501    }
2502  }
2503#endif // _WIN64
2504
2505  // Check to see if we caught the safepoint code in the
2506  // process of write protecting the memory serialization page.
2507  // It write enables the page immediately after protecting it
2508  // so just return.
2509  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2510    JavaThread* thread = (JavaThread*) t;
2511    PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2512    address addr = (address) exceptionRecord->ExceptionInformation[1];
2513    if (os::is_memory_serialize_page(thread, addr)) {
2514      // Block current thread until the memory serialize page permission restored.
2515      os::block_on_serialize_page_trap();
2516      return EXCEPTION_CONTINUE_EXECUTION;
2517    }
2518  }
2519
2520  if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2521      VM_Version::is_cpuinfo_segv_addr(pc)) {
2522    // Verify that OS save/restore AVX registers.
2523    return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2524  }
2525
2526  if (t != NULL && t->is_Java_thread()) {
2527    JavaThread* thread = (JavaThread*) t;
2528    bool in_java = thread->thread_state() == _thread_in_Java;
2529
2530    // Handle potential stack overflows up front.
2531    if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2532#ifdef _M_IA64
2533      // Use guard page for register stack.
2534      PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2535      address addr = (address) exceptionRecord->ExceptionInformation[1];
2536      // Check for a register stack overflow on Itanium
2537      if (thread->addr_inside_register_stack_red_zone(addr)) {
2538        // Fatal red zone violation happens if the Java program
2539        // catches a StackOverflow error and does so much processing
2540        // that it runs beyond the unprotected yellow guard zone. As
2541        // a result, we are out of here.
2542        fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit.");
2543      } else if(thread->addr_inside_register_stack(addr)) {
2544        // Disable the yellow zone which sets the state that
2545        // we've got a stack overflow problem.
2546        if (thread->stack_yellow_reserved_zone_enabled()) {
2547          thread->disable_stack_yellow_reserved_zone();
2548        }
2549        // Give us some room to process the exception.
2550        thread->disable_register_stack_guard();
2551        // Tracing with +Verbose.
2552        if (Verbose) {
2553          tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc);
2554          tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr);
2555          tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base());
2556          tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]",
2557                        thread->register_stack_base(),
2558                        thread->register_stack_base() + thread->stack_size());
2559        }
2560
2561        // Reguard the permanent register stack red zone just to be sure.
2562        // We saw Windows silently disabling this without telling us.
2563        thread->enable_register_stack_red_zone();
2564
2565        return Handle_Exception(exceptionInfo,
2566                                SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2567      }
2568#endif
2569      if (thread->stack_guards_enabled()) {
2570        if (_thread_in_Java) {
2571          frame fr;
2572          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2573          address addr = (address) exceptionRecord->ExceptionInformation[1];
2574          if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2575            assert(fr.is_java_frame(), "Must be a Java frame");
2576            SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2577          }
2578        }
2579        // Yellow zone violation.  The o/s has unprotected the first yellow
2580        // zone page for us.  Note:  must call disable_stack_yellow_zone to
2581        // update the enabled status, even if the zone contains only one page.
2582        thread->disable_stack_yellow_reserved_zone();
2583        // If not in java code, return and hope for the best.
2584        return in_java
2585            ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2586            :  EXCEPTION_CONTINUE_EXECUTION;
2587      } else {
2588        // Fatal red zone violation.
2589        thread->disable_stack_red_zone();
2590        tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2591        report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2592                      exceptionInfo->ContextRecord);
2593        return EXCEPTION_CONTINUE_SEARCH;
2594      }
2595    } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2596      // Either stack overflow or null pointer exception.
2597      if (in_java) {
2598        PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2599        address addr = (address) exceptionRecord->ExceptionInformation[1];
2600        address stack_end = thread->stack_end();
2601        if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2602          // Stack overflow.
2603          assert(!os::uses_stack_guard_pages(),
2604                 "should be caught by red zone code above.");
2605          return Handle_Exception(exceptionInfo,
2606                                  SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2607        }
2608        // Check for safepoint polling and implicit null
2609        // We only expect null pointers in the stubs (vtable)
2610        // the rest are checked explicitly now.
2611        CodeBlob* cb = CodeCache::find_blob(pc);
2612        if (cb != NULL) {
2613          if (os::is_poll_address(addr)) {
2614            address stub = SharedRuntime::get_poll_stub(pc);
2615            return Handle_Exception(exceptionInfo, stub);
2616          }
2617        }
2618        {
2619#ifdef _WIN64
2620          // If it's a legal stack address map the entire region in
2621          //
2622          PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2623          address addr = (address) exceptionRecord->ExceptionInformation[1];
2624          if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2625            addr = (address)((uintptr_t)addr &
2626                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2627            os::commit_memory((char *)addr, thread->stack_base() - addr,
2628                              !ExecMem);
2629            return EXCEPTION_CONTINUE_EXECUTION;
2630          } else
2631#endif
2632          {
2633            // Null pointer exception.
2634#ifdef _M_IA64
2635            // Process implicit null checks in compiled code. Note: Implicit null checks
2636            // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs.
2637            if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) {
2638              CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format);
2639              // Handle implicit null check in UEP method entry
2640              if (cb && (cb->is_frame_complete_at(pc) ||
2641                         (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) {
2642                if (Verbose) {
2643                  intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0);
2644                  tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format);
2645                  tty->print_cr("      to addr " INTPTR_FORMAT, addr);
2646                  tty->print_cr("      bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)",
2647                                *(bundle_start + 1), *bundle_start);
2648                }
2649                return Handle_Exception(exceptionInfo,
2650                                        SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL));
2651              }
2652            }
2653
2654            // Implicit null checks were processed above.  Hence, we should not reach
2655            // here in the usual case => die!
2656            if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception");
2657            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2658                         exceptionInfo->ContextRecord);
2659            return EXCEPTION_CONTINUE_SEARCH;
2660
2661#else // !IA64
2662
2663            if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) {
2664              address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2665              if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2666            }
2667            report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2668                         exceptionInfo->ContextRecord);
2669            return EXCEPTION_CONTINUE_SEARCH;
2670#endif
2671          }
2672        }
2673      }
2674
2675#ifdef _WIN64
2676      // Special care for fast JNI field accessors.
2677      // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2678      // in and the heap gets shrunk before the field access.
2679      if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2680        address addr = JNI_FastGetField::find_slowcase_pc(pc);
2681        if (addr != (address)-1) {
2682          return Handle_Exception(exceptionInfo, addr);
2683        }
2684      }
2685#endif
2686
2687      // Stack overflow or null pointer exception in native code.
2688      report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2689                   exceptionInfo->ContextRecord);
2690      return EXCEPTION_CONTINUE_SEARCH;
2691    } // /EXCEPTION_ACCESS_VIOLATION
2692    // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2693#if defined _M_IA64
2694    else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2695              exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2696      M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0);
2697
2698      // Compiled method patched to be non entrant? Following conditions must apply:
2699      // 1. must be first instruction in bundle
2700      // 2. must be a break instruction with appropriate code
2701      if ((((uint64_t) pc & 0x0F) == 0) &&
2702          (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) {
2703        return Handle_Exception(exceptionInfo,
2704                                (address)SharedRuntime::get_handle_wrong_method_stub());
2705      }
2706    } // /EXCEPTION_ILLEGAL_INSTRUCTION
2707#endif
2708
2709
2710    if (in_java) {
2711      switch (exception_code) {
2712      case EXCEPTION_INT_DIVIDE_BY_ZERO:
2713        return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2714
2715      case EXCEPTION_INT_OVERFLOW:
2716        return Handle_IDiv_Exception(exceptionInfo);
2717
2718      } // switch
2719    }
2720    if (((thread->thread_state() == _thread_in_Java) ||
2721         (thread->thread_state() == _thread_in_native)) &&
2722         exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2723      LONG result=Handle_FLT_Exception(exceptionInfo);
2724      if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2725    }
2726  }
2727
2728  if (exception_code != EXCEPTION_BREAKPOINT) {
2729    report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2730                 exceptionInfo->ContextRecord);
2731  }
2732  return EXCEPTION_CONTINUE_SEARCH;
2733}
2734
2735#ifndef _WIN64
2736// Special care for fast JNI accessors.
2737// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2738// the heap gets shrunk before the field access.
2739// Need to install our own structured exception handler since native code may
2740// install its own.
2741LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2742  DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2743  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2744    address pc = (address) exceptionInfo->ContextRecord->Eip;
2745    address addr = JNI_FastGetField::find_slowcase_pc(pc);
2746    if (addr != (address)-1) {
2747      return Handle_Exception(exceptionInfo, addr);
2748    }
2749  }
2750  return EXCEPTION_CONTINUE_SEARCH;
2751}
2752
2753#define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2754  Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2755                                                     jobject obj,           \
2756                                                     jfieldID fieldID) {    \
2757    __try {                                                                 \
2758      return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2759                                                                 obj,       \
2760                                                                 fieldID);  \
2761    } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2762                                              _exception_info())) {         \
2763    }                                                                       \
2764    return 0;                                                               \
2765  }
2766
2767DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2768DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2769DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2770DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2771DEFINE_FAST_GETFIELD(jint,     int,    Int)
2772DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2773DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2774DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2775
2776address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2777  switch (type) {
2778  case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2779  case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2780  case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2781  case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2782  case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2783  case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2784  case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2785  case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2786  default:        ShouldNotReachHere();
2787  }
2788  return (address)-1;
2789}
2790#endif
2791
2792// Virtual Memory
2793
2794int os::vm_page_size() { return os::win32::vm_page_size(); }
2795int os::vm_allocation_granularity() {
2796  return os::win32::vm_allocation_granularity();
2797}
2798
2799// Windows large page support is available on Windows 2003. In order to use
2800// large page memory, the administrator must first assign additional privilege
2801// to the user:
2802//   + select Control Panel -> Administrative Tools -> Local Security Policy
2803//   + select Local Policies -> User Rights Assignment
2804//   + double click "Lock pages in memory", add users and/or groups
2805//   + reboot
2806// Note the above steps are needed for administrator as well, as administrators
2807// by default do not have the privilege to lock pages in memory.
2808//
2809// Note about Windows 2003: although the API supports committing large page
2810// memory on a page-by-page basis and VirtualAlloc() returns success under this
2811// scenario, I found through experiment it only uses large page if the entire
2812// memory region is reserved and committed in a single VirtualAlloc() call.
2813// This makes Windows large page support more or less like Solaris ISM, in
2814// that the entire heap must be committed upfront. This probably will change
2815// in the future, if so the code below needs to be revisited.
2816
2817#ifndef MEM_LARGE_PAGES
2818  #define MEM_LARGE_PAGES 0x20000000
2819#endif
2820
2821static HANDLE    _hProcess;
2822static HANDLE    _hToken;
2823
2824// Container for NUMA node list info
2825class NUMANodeListHolder {
2826 private:
2827  int *_numa_used_node_list;  // allocated below
2828  int _numa_used_node_count;
2829
2830  void free_node_list() {
2831    if (_numa_used_node_list != NULL) {
2832      FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2833    }
2834  }
2835
2836 public:
2837  NUMANodeListHolder() {
2838    _numa_used_node_count = 0;
2839    _numa_used_node_list = NULL;
2840    // do rest of initialization in build routine (after function pointers are set up)
2841  }
2842
2843  ~NUMANodeListHolder() {
2844    free_node_list();
2845  }
2846
2847  bool build() {
2848    DWORD_PTR proc_aff_mask;
2849    DWORD_PTR sys_aff_mask;
2850    if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2851    ULONG highest_node_number;
2852    if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2853    free_node_list();
2854    _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2855    for (unsigned int i = 0; i <= highest_node_number; i++) {
2856      ULONGLONG proc_mask_numa_node;
2857      if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2858      if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2859        _numa_used_node_list[_numa_used_node_count++] = i;
2860      }
2861    }
2862    return (_numa_used_node_count > 1);
2863  }
2864
2865  int get_count() { return _numa_used_node_count; }
2866  int get_node_list_entry(int n) {
2867    // for indexes out of range, returns -1
2868    return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2869  }
2870
2871} numa_node_list_holder;
2872
2873
2874
2875static size_t _large_page_size = 0;
2876
2877static bool request_lock_memory_privilege() {
2878  _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2879                          os::current_process_id());
2880
2881  LUID luid;
2882  if (_hProcess != NULL &&
2883      OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2884      LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2885
2886    TOKEN_PRIVILEGES tp;
2887    tp.PrivilegeCount = 1;
2888    tp.Privileges[0].Luid = luid;
2889    tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2890
2891    // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2892    // privilege. Check GetLastError() too. See MSDN document.
2893    if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2894        (GetLastError() == ERROR_SUCCESS)) {
2895      return true;
2896    }
2897  }
2898
2899  return false;
2900}
2901
2902static void cleanup_after_large_page_init() {
2903  if (_hProcess) CloseHandle(_hProcess);
2904  _hProcess = NULL;
2905  if (_hToken) CloseHandle(_hToken);
2906  _hToken = NULL;
2907}
2908
2909static bool numa_interleaving_init() {
2910  bool success = false;
2911  bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2912
2913  // print a warning if UseNUMAInterleaving flag is specified on command line
2914  bool warn_on_failure = use_numa_interleaving_specified;
2915#define WARN(msg) if (warn_on_failure) { warning(msg); }
2916
2917  // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2918  size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2919  NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity);
2920
2921  if (numa_node_list_holder.build()) {
2922    if (log_is_enabled(Debug, os, cpu)) {
2923      Log(os, cpu) log;
2924      log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2925      for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2926        log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2927      }
2928    }
2929    success = true;
2930  } else {
2931    WARN("Process does not cover multiple NUMA nodes.");
2932  }
2933  if (!success) {
2934    if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2935  }
2936  return success;
2937#undef WARN
2938}
2939
2940// this routine is used whenever we need to reserve a contiguous VA range
2941// but we need to make separate VirtualAlloc calls for each piece of the range
2942// Reasons for doing this:
2943//  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2944//  * UseNUMAInterleaving requires a separate node for each piece
2945static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2946                                         DWORD prot,
2947                                         bool should_inject_error = false) {
2948  char * p_buf;
2949  // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2950  size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2951  size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2952
2953  // first reserve enough address space in advance since we want to be
2954  // able to break a single contiguous virtual address range into multiple
2955  // large page commits but WS2003 does not allow reserving large page space
2956  // so we just use 4K pages for reserve, this gives us a legal contiguous
2957  // address space. then we will deallocate that reservation, and re alloc
2958  // using large pages
2959  const size_t size_of_reserve = bytes + chunk_size;
2960  if (bytes > size_of_reserve) {
2961    // Overflowed.
2962    return NULL;
2963  }
2964  p_buf = (char *) VirtualAlloc(addr,
2965                                size_of_reserve,  // size of Reserve
2966                                MEM_RESERVE,
2967                                PAGE_READWRITE);
2968  // If reservation failed, return NULL
2969  if (p_buf == NULL) return NULL;
2970  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2971  os::release_memory(p_buf, bytes + chunk_size);
2972
2973  // we still need to round up to a page boundary (in case we are using large pages)
2974  // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2975  // instead we handle this in the bytes_to_rq computation below
2976  p_buf = (char *) align_size_up((size_t)p_buf, page_size);
2977
2978  // now go through and allocate one chunk at a time until all bytes are
2979  // allocated
2980  size_t  bytes_remaining = bytes;
2981  // An overflow of align_size_up() would have been caught above
2982  // in the calculation of size_of_reserve.
2983  char * next_alloc_addr = p_buf;
2984  HANDLE hProc = GetCurrentProcess();
2985
2986#ifdef ASSERT
2987  // Variable for the failure injection
2988  long ran_num = os::random();
2989  size_t fail_after = ran_num % bytes;
2990#endif
2991
2992  int count=0;
2993  while (bytes_remaining) {
2994    // select bytes_to_rq to get to the next chunk_size boundary
2995
2996    size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2997    // Note allocate and commit
2998    char * p_new;
2999
3000#ifdef ASSERT
3001    bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3002#else
3003    const bool inject_error_now = false;
3004#endif
3005
3006    if (inject_error_now) {
3007      p_new = NULL;
3008    } else {
3009      if (!UseNUMAInterleaving) {
3010        p_new = (char *) VirtualAlloc(next_alloc_addr,
3011                                      bytes_to_rq,
3012                                      flags,
3013                                      prot);
3014      } else {
3015        // get the next node to use from the used_node_list
3016        assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3017        DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3018        p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3019      }
3020    }
3021
3022    if (p_new == NULL) {
3023      // Free any allocated pages
3024      if (next_alloc_addr > p_buf) {
3025        // Some memory was committed so release it.
3026        size_t bytes_to_release = bytes - bytes_remaining;
3027        // NMT has yet to record any individual blocks, so it
3028        // need to create a dummy 'reserve' record to match
3029        // the release.
3030        MemTracker::record_virtual_memory_reserve((address)p_buf,
3031                                                  bytes_to_release, CALLER_PC);
3032        os::release_memory(p_buf, bytes_to_release);
3033      }
3034#ifdef ASSERT
3035      if (should_inject_error) {
3036        log_develop_debug(pagesize)("Reserving pages individually failed.");
3037      }
3038#endif
3039      return NULL;
3040    }
3041
3042    bytes_remaining -= bytes_to_rq;
3043    next_alloc_addr += bytes_to_rq;
3044    count++;
3045  }
3046  // Although the memory is allocated individually, it is returned as one.
3047  // NMT records it as one block.
3048  if ((flags & MEM_COMMIT) != 0) {
3049    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3050  } else {
3051    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3052  }
3053
3054  // made it this far, success
3055  return p_buf;
3056}
3057
3058
3059
3060void os::large_page_init() {
3061  if (!UseLargePages) return;
3062
3063  // print a warning if any large page related flag is specified on command line
3064  bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3065                         !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3066  bool success = false;
3067
3068#define WARN(msg) if (warn_on_failure) { warning(msg); }
3069  if (request_lock_memory_privilege()) {
3070    size_t s = GetLargePageMinimum();
3071    if (s) {
3072#if defined(IA32) || defined(AMD64)
3073      if (s > 4*M || LargePageSizeInBytes > 4*M) {
3074        WARN("JVM cannot use large pages bigger than 4mb.");
3075      } else {
3076#endif
3077        if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
3078          _large_page_size = LargePageSizeInBytes;
3079        } else {
3080          _large_page_size = s;
3081        }
3082        success = true;
3083#if defined(IA32) || defined(AMD64)
3084      }
3085#endif
3086    } else {
3087      WARN("Large page is not supported by the processor.");
3088    }
3089  } else {
3090    WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3091  }
3092#undef WARN
3093
3094  const size_t default_page_size = (size_t) vm_page_size();
3095  if (success && _large_page_size > default_page_size) {
3096    _page_sizes[0] = _large_page_size;
3097    _page_sizes[1] = default_page_size;
3098    _page_sizes[2] = 0;
3099  }
3100
3101  cleanup_after_large_page_init();
3102  UseLargePages = success;
3103}
3104
3105// On win32, one cannot release just a part of reserved memory, it's an
3106// all or nothing deal.  When we split a reservation, we must break the
3107// reservation into two reservations.
3108void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3109                                  bool realloc) {
3110  if (size > 0) {
3111    release_memory(base, size);
3112    if (realloc) {
3113      reserve_memory(split, base);
3114    }
3115    if (size != split) {
3116      reserve_memory(size - split, base + split);
3117    }
3118  }
3119}
3120
3121// Multiple threads can race in this code but it's not possible to unmap small sections of
3122// virtual space to get requested alignment, like posix-like os's.
3123// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3124char* os::reserve_memory_aligned(size_t size, size_t alignment) {
3125  assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3126         "Alignment must be a multiple of allocation granularity (page size)");
3127  assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3128
3129  size_t extra_size = size + alignment;
3130  assert(extra_size >= size, "overflow, size is too large to allow alignment");
3131
3132  char* aligned_base = NULL;
3133
3134  do {
3135    char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
3136    if (extra_base == NULL) {
3137      return NULL;
3138    }
3139    // Do manual alignment
3140    aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
3141
3142    os::release_memory(extra_base, extra_size);
3143
3144    aligned_base = os::reserve_memory(size, aligned_base);
3145
3146  } while (aligned_base == NULL);
3147
3148  return aligned_base;
3149}
3150
3151char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3152  assert((size_t)addr % os::vm_allocation_granularity() == 0,
3153         "reserve alignment");
3154  assert(bytes % os::vm_page_size() == 0, "reserve page size");
3155  char* res;
3156  // note that if UseLargePages is on, all the areas that require interleaving
3157  // will go thru reserve_memory_special rather than thru here.
3158  bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3159  if (!use_individual) {
3160    res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3161  } else {
3162    elapsedTimer reserveTimer;
3163    if (Verbose && PrintMiscellaneous) reserveTimer.start();
3164    // in numa interleaving, we have to allocate pages individually
3165    // (well really chunks of NUMAInterleaveGranularity size)
3166    res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3167    if (res == NULL) {
3168      warning("NUMA page allocation failed");
3169    }
3170    if (Verbose && PrintMiscellaneous) {
3171      reserveTimer.stop();
3172      tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3173                    reserveTimer.milliseconds(), reserveTimer.ticks());
3174    }
3175  }
3176  assert(res == NULL || addr == NULL || addr == res,
3177         "Unexpected address from reserve.");
3178
3179  return res;
3180}
3181
3182// Reserve memory at an arbitrary address, only if that area is
3183// available (and not reserved for something else).
3184char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3185  // Windows os::reserve_memory() fails of the requested address range is
3186  // not avilable.
3187  return reserve_memory(bytes, requested_addr);
3188}
3189
3190size_t os::large_page_size() {
3191  return _large_page_size;
3192}
3193
3194bool os::can_commit_large_page_memory() {
3195  // Windows only uses large page memory when the entire region is reserved
3196  // and committed in a single VirtualAlloc() call. This may change in the
3197  // future, but with Windows 2003 it's not possible to commit on demand.
3198  return false;
3199}
3200
3201bool os::can_execute_large_page_memory() {
3202  return true;
3203}
3204
3205char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3206                                 bool exec) {
3207  assert(UseLargePages, "only for large pages");
3208
3209  if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3210    return NULL; // Fallback to small pages.
3211  }
3212
3213  const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3214  const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3215
3216  // with large pages, there are two cases where we need to use Individual Allocation
3217  // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3218  // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3219  if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3220    log_debug(pagesize)("Reserving large pages individually.");
3221
3222    char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3223    if (p_buf == NULL) {
3224      // give an appropriate warning message
3225      if (UseNUMAInterleaving) {
3226        warning("NUMA large page allocation failed, UseLargePages flag ignored");
3227      }
3228      if (UseLargePagesIndividualAllocation) {
3229        warning("Individually allocated large pages failed, "
3230                "use -XX:-UseLargePagesIndividualAllocation to turn off");
3231      }
3232      return NULL;
3233    }
3234
3235    return p_buf;
3236
3237  } else {
3238    log_debug(pagesize)("Reserving large pages in a single large chunk.");
3239
3240    // normal policy just allocate it all at once
3241    DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3242    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3243    if (res != NULL) {
3244      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3245    }
3246
3247    return res;
3248  }
3249}
3250
3251bool os::release_memory_special(char* base, size_t bytes) {
3252  assert(base != NULL, "Sanity check");
3253  return release_memory(base, bytes);
3254}
3255
3256void os::print_statistics() {
3257}
3258
3259static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3260  int err = os::get_last_error();
3261  char buf[256];
3262  size_t buf_len = os::lasterror(buf, sizeof(buf));
3263  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3264          ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3265          exec, buf_len != 0 ? buf : "<no_error_string>", err);
3266}
3267
3268bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3269  if (bytes == 0) {
3270    // Don't bother the OS with noops.
3271    return true;
3272  }
3273  assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3274  assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3275  // Don't attempt to print anything if the OS call fails. We're
3276  // probably low on resources, so the print itself may cause crashes.
3277
3278  // unless we have NUMAInterleaving enabled, the range of a commit
3279  // is always within a reserve covered by a single VirtualAlloc
3280  // in that case we can just do a single commit for the requested size
3281  if (!UseNUMAInterleaving) {
3282    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3283      NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3284      return false;
3285    }
3286    if (exec) {
3287      DWORD oldprot;
3288      // Windows doc says to use VirtualProtect to get execute permissions
3289      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3290        NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3291        return false;
3292      }
3293    }
3294    return true;
3295  } else {
3296
3297    // when NUMAInterleaving is enabled, the commit might cover a range that
3298    // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3299    // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3300    // returns represents the number of bytes that can be committed in one step.
3301    size_t bytes_remaining = bytes;
3302    char * next_alloc_addr = addr;
3303    while (bytes_remaining > 0) {
3304      MEMORY_BASIC_INFORMATION alloc_info;
3305      VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3306      size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3307      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3308                       PAGE_READWRITE) == NULL) {
3309        NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3310                                            exec);)
3311        return false;
3312      }
3313      if (exec) {
3314        DWORD oldprot;
3315        if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3316                            PAGE_EXECUTE_READWRITE, &oldprot)) {
3317          NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3318                                              exec);)
3319          return false;
3320        }
3321      }
3322      bytes_remaining -= bytes_to_rq;
3323      next_alloc_addr += bytes_to_rq;
3324    }
3325  }
3326  // if we made it this far, return true
3327  return true;
3328}
3329
3330bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3331                          bool exec) {
3332  // alignment_hint is ignored on this OS
3333  return pd_commit_memory(addr, size, exec);
3334}
3335
3336void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3337                                  const char* mesg) {
3338  assert(mesg != NULL, "mesg must be specified");
3339  if (!pd_commit_memory(addr, size, exec)) {
3340    warn_fail_commit_memory(addr, size, exec);
3341    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3342  }
3343}
3344
3345void os::pd_commit_memory_or_exit(char* addr, size_t size,
3346                                  size_t alignment_hint, bool exec,
3347                                  const char* mesg) {
3348  // alignment_hint is ignored on this OS
3349  pd_commit_memory_or_exit(addr, size, exec, mesg);
3350}
3351
3352bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3353  if (bytes == 0) {
3354    // Don't bother the OS with noops.
3355    return true;
3356  }
3357  assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3358  assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3359  return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3360}
3361
3362bool os::pd_release_memory(char* addr, size_t bytes) {
3363  return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3364}
3365
3366bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3367  return os::commit_memory(addr, size, !ExecMem);
3368}
3369
3370bool os::remove_stack_guard_pages(char* addr, size_t size) {
3371  return os::uncommit_memory(addr, size);
3372}
3373
3374static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3375  uint count = 0;
3376  bool ret = false;
3377  size_t bytes_remaining = bytes;
3378  char * next_protect_addr = addr;
3379
3380  // Use VirtualQuery() to get the chunk size.
3381  while (bytes_remaining) {
3382    MEMORY_BASIC_INFORMATION alloc_info;
3383    if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3384      return false;
3385    }
3386
3387    size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3388    // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3389    // but we don't distinguish here as both cases are protected by same API.
3390    ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3391    warning("Failed protecting pages individually for chunk #%u", count);
3392    if (!ret) {
3393      return false;
3394    }
3395
3396    bytes_remaining -= bytes_to_protect;
3397    next_protect_addr += bytes_to_protect;
3398    count++;
3399  }
3400  return ret;
3401}
3402
3403// Set protections specified
3404bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3405                        bool is_committed) {
3406  unsigned int p = 0;
3407  switch (prot) {
3408  case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3409  case MEM_PROT_READ: p = PAGE_READONLY; break;
3410  case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3411  case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3412  default:
3413    ShouldNotReachHere();
3414  }
3415
3416  DWORD old_status;
3417
3418  // Strange enough, but on Win32 one can change protection only for committed
3419  // memory, not a big deal anyway, as bytes less or equal than 64K
3420  if (!is_committed) {
3421    commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3422                          "cannot commit protection page");
3423  }
3424  // One cannot use os::guard_memory() here, as on Win32 guard page
3425  // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3426  //
3427  // Pages in the region become guard pages. Any attempt to access a guard page
3428  // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3429  // the guard page status. Guard pages thus act as a one-time access alarm.
3430  bool ret;
3431  if (UseNUMAInterleaving) {
3432    // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3433    // so we must protect the chunks individually.
3434    ret = protect_pages_individually(addr, bytes, p, &old_status);
3435  } else {
3436    ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3437  }
3438#ifdef ASSERT
3439  if (!ret) {
3440    int err = os::get_last_error();
3441    char buf[256];
3442    size_t buf_len = os::lasterror(buf, sizeof(buf));
3443    warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3444          ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3445          buf_len != 0 ? buf : "<no_error_string>", err);
3446  }
3447#endif
3448  return ret;
3449}
3450
3451bool os::guard_memory(char* addr, size_t bytes) {
3452  DWORD old_status;
3453  return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3454}
3455
3456bool os::unguard_memory(char* addr, size_t bytes) {
3457  DWORD old_status;
3458  return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3459}
3460
3461void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3462void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3463void os::numa_make_global(char *addr, size_t bytes)    { }
3464void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3465bool os::numa_topology_changed()                       { return false; }
3466size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3467int os::numa_get_group_id()                            { return 0; }
3468size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3469  if (numa_node_list_holder.get_count() == 0 && size > 0) {
3470    // Provide an answer for UMA systems
3471    ids[0] = 0;
3472    return 1;
3473  } else {
3474    // check for size bigger than actual groups_num
3475    size = MIN2(size, numa_get_groups_num());
3476    for (int i = 0; i < (int)size; i++) {
3477      ids[i] = numa_node_list_holder.get_node_list_entry(i);
3478    }
3479    return size;
3480  }
3481}
3482
3483bool os::get_page_info(char *start, page_info* info) {
3484  return false;
3485}
3486
3487char *os::scan_pages(char *start, char* end, page_info* page_expected,
3488                     page_info* page_found) {
3489  return end;
3490}
3491
3492char* os::non_memory_address_word() {
3493  // Must never look like an address returned by reserve_memory,
3494  // even in its subfields (as defined by the CPU immediate fields,
3495  // if the CPU splits constants across multiple instructions).
3496  return (char*)-1;
3497}
3498
3499#define MAX_ERROR_COUNT 100
3500#define SYS_THREAD_ERROR 0xffffffffUL
3501
3502void os::pd_start_thread(Thread* thread) {
3503  DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3504  // Returns previous suspend state:
3505  // 0:  Thread was not suspended
3506  // 1:  Thread is running now
3507  // >1: Thread is still suspended.
3508  assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3509}
3510
3511class HighResolutionInterval : public CHeapObj<mtThread> {
3512  // The default timer resolution seems to be 10 milliseconds.
3513  // (Where is this written down?)
3514  // If someone wants to sleep for only a fraction of the default,
3515  // then we set the timer resolution down to 1 millisecond for
3516  // the duration of their interval.
3517  // We carefully set the resolution back, since otherwise we
3518  // seem to incur an overhead (3%?) that we don't need.
3519  // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
3520  // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
3521  // Alternatively, we could compute the relative error (503/500 = .6%) and only use
3522  // timeBeginPeriod() if the relative error exceeded some threshold.
3523  // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
3524  // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
3525  // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
3526  // resolution timers running.
3527 private:
3528  jlong resolution;
3529 public:
3530  HighResolutionInterval(jlong ms) {
3531    resolution = ms % 10L;
3532    if (resolution != 0) {
3533      MMRESULT result = timeBeginPeriod(1L);
3534    }
3535  }
3536  ~HighResolutionInterval() {
3537    if (resolution != 0) {
3538      MMRESULT result = timeEndPeriod(1L);
3539    }
3540    resolution = 0L;
3541  }
3542};
3543
3544int os::sleep(Thread* thread, jlong ms, bool interruptable) {
3545  jlong limit = (jlong) MAXDWORD;
3546
3547  while (ms > limit) {
3548    int res;
3549    if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) {
3550      return res;
3551    }
3552    ms -= limit;
3553  }
3554
3555  assert(thread == Thread::current(), "thread consistency check");
3556  OSThread* osthread = thread->osthread();
3557  OSThreadWaitState osts(osthread, false /* not Object.wait() */);
3558  int result;
3559  if (interruptable) {
3560    assert(thread->is_Java_thread(), "must be java thread");
3561    JavaThread *jt = (JavaThread *) thread;
3562    ThreadBlockInVM tbivm(jt);
3563
3564    jt->set_suspend_equivalent();
3565    // cleared by handle_special_suspend_equivalent_condition() or
3566    // java_suspend_self() via check_and_wait_while_suspended()
3567
3568    HANDLE events[1];
3569    events[0] = osthread->interrupt_event();
3570    HighResolutionInterval *phri=NULL;
3571    if (!ForceTimeHighResolution) {
3572      phri = new HighResolutionInterval(ms);
3573    }
3574    if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) {
3575      result = OS_TIMEOUT;
3576    } else {
3577      ResetEvent(osthread->interrupt_event());
3578      osthread->set_interrupted(false);
3579      result = OS_INTRPT;
3580    }
3581    delete phri; //if it is NULL, harmless
3582
3583    // were we externally suspended while we were waiting?
3584    jt->check_and_wait_while_suspended();
3585  } else {
3586    assert(!thread->is_Java_thread(), "must not be java thread");
3587    Sleep((long) ms);
3588    result = OS_TIMEOUT;
3589  }
3590  return result;
3591}
3592
3593// Short sleep, direct OS call.
3594//
3595// ms = 0, means allow others (if any) to run.
3596//
3597void os::naked_short_sleep(jlong ms) {
3598  assert(ms < 1000, "Un-interruptable sleep, short time use only");
3599  Sleep(ms);
3600}
3601
3602// Sleep forever; naked call to OS-specific sleep; use with CAUTION
3603void os::infinite_sleep() {
3604  while (true) {    // sleep forever ...
3605    Sleep(100000);  // ... 100 seconds at a time
3606  }
3607}
3608
3609typedef BOOL (WINAPI * STTSignature)(void);
3610
3611void os::naked_yield() {
3612  // Consider passing back the return value from SwitchToThread().
3613  SwitchToThread();
3614}
3615
3616// Win32 only gives you access to seven real priorities at a time,
3617// so we compress Java's ten down to seven.  It would be better
3618// if we dynamically adjusted relative priorities.
3619
3620int os::java_to_os_priority[CriticalPriority + 1] = {
3621  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3622  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3623  THREAD_PRIORITY_LOWEST,                       // 2
3624  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3625  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3626  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3627  THREAD_PRIORITY_NORMAL,                       // 6
3628  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3629  THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3630  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3631  THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3632  THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3633};
3634
3635int prio_policy1[CriticalPriority + 1] = {
3636  THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3637  THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3638  THREAD_PRIORITY_LOWEST,                       // 2
3639  THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3640  THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3641  THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3642  THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3643  THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3644  THREAD_PRIORITY_HIGHEST,                      // 8
3645  THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3646  THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3647  THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3648};
3649
3650static int prio_init() {
3651  // If ThreadPriorityPolicy is 1, switch tables
3652  if (ThreadPriorityPolicy == 1) {
3653    int i;
3654    for (i = 0; i < CriticalPriority + 1; i++) {
3655      os::java_to_os_priority[i] = prio_policy1[i];
3656    }
3657  }
3658  if (UseCriticalJavaThreadPriority) {
3659    os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3660  }
3661  return 0;
3662}
3663
3664OSReturn os::set_native_priority(Thread* thread, int priority) {
3665  if (!UseThreadPriorities) return OS_OK;
3666  bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3667  return ret ? OS_OK : OS_ERR;
3668}
3669
3670OSReturn os::get_native_priority(const Thread* const thread,
3671                                 int* priority_ptr) {
3672  if (!UseThreadPriorities) {
3673    *priority_ptr = java_to_os_priority[NormPriority];
3674    return OS_OK;
3675  }
3676  int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3677  if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3678    assert(false, "GetThreadPriority failed");
3679    return OS_ERR;
3680  }
3681  *priority_ptr = os_prio;
3682  return OS_OK;
3683}
3684
3685
3686// Hint to the underlying OS that a task switch would not be good.
3687// Void return because it's a hint and can fail.
3688void os::hint_no_preempt() {}
3689
3690void os::interrupt(Thread* thread) {
3691  assert(!thread->is_Java_thread() || Thread::current() == thread ||
3692         Threads_lock->owned_by_self(),
3693         "possibility of dangling Thread pointer");
3694
3695  OSThread* osthread = thread->osthread();
3696  osthread->set_interrupted(true);
3697  // More than one thread can get here with the same value of osthread,
3698  // resulting in multiple notifications.  We do, however, want the store
3699  // to interrupted() to be visible to other threads before we post
3700  // the interrupt event.
3701  OrderAccess::release();
3702  SetEvent(osthread->interrupt_event());
3703  // For JSR166:  unpark after setting status
3704  if (thread->is_Java_thread()) {
3705    ((JavaThread*)thread)->parker()->unpark();
3706  }
3707
3708  ParkEvent * ev = thread->_ParkEvent;
3709  if (ev != NULL) ev->unpark();
3710}
3711
3712
3713bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3714  assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(),
3715         "possibility of dangling Thread pointer");
3716
3717  OSThread* osthread = thread->osthread();
3718  // There is no synchronization between the setting of the interrupt
3719  // and it being cleared here. It is critical - see 6535709 - that
3720  // we only clear the interrupt state, and reset the interrupt event,
3721  // if we are going to report that we were indeed interrupted - else
3722  // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3723  // depending on the timing. By checking thread interrupt event to see
3724  // if the thread gets real interrupt thus prevent spurious wakeup.
3725  bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3726  if (interrupted && clear_interrupted) {
3727    osthread->set_interrupted(false);
3728    ResetEvent(osthread->interrupt_event());
3729  } // Otherwise leave the interrupted state alone
3730
3731  return interrupted;
3732}
3733
3734// Get's a pc (hint) for a running thread. Currently used only for profiling.
3735ExtendedPC os::get_thread_pc(Thread* thread) {
3736  CONTEXT context;
3737  context.ContextFlags = CONTEXT_CONTROL;
3738  HANDLE handle = thread->osthread()->thread_handle();
3739#ifdef _M_IA64
3740  assert(0, "Fix get_thread_pc");
3741  return ExtendedPC(NULL);
3742#else
3743  if (GetThreadContext(handle, &context)) {
3744#ifdef _M_AMD64
3745    return ExtendedPC((address) context.Rip);
3746#else
3747    return ExtendedPC((address) context.Eip);
3748#endif
3749  } else {
3750    return ExtendedPC(NULL);
3751  }
3752#endif
3753}
3754
3755// GetCurrentThreadId() returns DWORD
3756intx os::current_thread_id()  { return GetCurrentThreadId(); }
3757
3758static int _initial_pid = 0;
3759
3760int os::current_process_id() {
3761  return (_initial_pid ? _initial_pid : _getpid());
3762}
3763
3764int    os::win32::_vm_page_size              = 0;
3765int    os::win32::_vm_allocation_granularity = 0;
3766int    os::win32::_processor_type            = 0;
3767// Processor level is not available on non-NT systems, use vm_version instead
3768int    os::win32::_processor_level           = 0;
3769julong os::win32::_physical_memory           = 0;
3770size_t os::win32::_default_stack_size        = 0;
3771
3772intx          os::win32::_os_thread_limit    = 0;
3773volatile intx os::win32::_os_thread_count    = 0;
3774
3775bool   os::win32::_is_windows_server         = false;
3776
3777// 6573254
3778// Currently, the bug is observed across all the supported Windows releases,
3779// including the latest one (as of this writing - Windows Server 2012 R2)
3780bool   os::win32::_has_exit_bug              = true;
3781
3782void os::win32::initialize_system_info() {
3783  SYSTEM_INFO si;
3784  GetSystemInfo(&si);
3785  _vm_page_size    = si.dwPageSize;
3786  _vm_allocation_granularity = si.dwAllocationGranularity;
3787  _processor_type  = si.dwProcessorType;
3788  _processor_level = si.wProcessorLevel;
3789  set_processor_count(si.dwNumberOfProcessors);
3790
3791  MEMORYSTATUSEX ms;
3792  ms.dwLength = sizeof(ms);
3793
3794  // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3795  // dwMemoryLoad (% of memory in use)
3796  GlobalMemoryStatusEx(&ms);
3797  _physical_memory = ms.ullTotalPhys;
3798
3799  OSVERSIONINFOEX oi;
3800  oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3801  GetVersionEx((OSVERSIONINFO*)&oi);
3802  switch (oi.dwPlatformId) {
3803  case VER_PLATFORM_WIN32_NT:
3804    {
3805      int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3806      if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3807          oi.wProductType == VER_NT_SERVER) {
3808        _is_windows_server = true;
3809      }
3810    }
3811    break;
3812  default: fatal("Unknown platform");
3813  }
3814
3815  _default_stack_size = os::current_stack_size();
3816  assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3817  assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3818         "stack size not a multiple of page size");
3819
3820  initialize_performance_counter();
3821}
3822
3823
3824HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3825                                      int ebuflen) {
3826  char path[MAX_PATH];
3827  DWORD size;
3828  DWORD pathLen = (DWORD)sizeof(path);
3829  HINSTANCE result = NULL;
3830
3831  // only allow library name without path component
3832  assert(strchr(name, '\\') == NULL, "path not allowed");
3833  assert(strchr(name, ':') == NULL, "path not allowed");
3834  if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3835    jio_snprintf(ebuf, ebuflen,
3836                 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3837    return NULL;
3838  }
3839
3840  // search system directory
3841  if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3842    if (size >= pathLen) {
3843      return NULL; // truncated
3844    }
3845    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3846      return NULL; // truncated
3847    }
3848    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3849      return result;
3850    }
3851  }
3852
3853  // try Windows directory
3854  if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3855    if (size >= pathLen) {
3856      return NULL; // truncated
3857    }
3858    if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3859      return NULL; // truncated
3860    }
3861    if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3862      return result;
3863    }
3864  }
3865
3866  jio_snprintf(ebuf, ebuflen,
3867               "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3868  return NULL;
3869}
3870
3871#define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3872#define EXIT_TIMEOUT 300000 /* 5 minutes */
3873
3874static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3875  InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3876  return TRUE;
3877}
3878
3879int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3880  // Basic approach:
3881  //  - Each exiting thread registers its intent to exit and then does so.
3882  //  - A thread trying to terminate the process must wait for all
3883  //    threads currently exiting to complete their exit.
3884
3885  if (os::win32::has_exit_bug()) {
3886    // The array holds handles of the threads that have started exiting by calling
3887    // _endthreadex().
3888    // Should be large enough to avoid blocking the exiting thread due to lack of
3889    // a free slot.
3890    static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3891    static int handle_count = 0;
3892
3893    static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3894    static CRITICAL_SECTION crit_sect;
3895    static volatile jint process_exiting = 0;
3896    int i, j;
3897    DWORD res;
3898    HANDLE hproc, hthr;
3899
3900    // We only attempt to register threads until a process exiting
3901    // thread manages to set the process_exiting flag. Any threads
3902    // that come through here after the process_exiting flag is set
3903    // are unregistered and will be caught in the SuspendThread()
3904    // infinite loop below.
3905    bool registered = false;
3906
3907    // The first thread that reached this point, initializes the critical section.
3908    if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3909      warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3910    } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3911      if (what != EPT_THREAD) {
3912        // Atomically set process_exiting before the critical section
3913        // to increase the visibility between racing threads.
3914        Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0);
3915      }
3916      EnterCriticalSection(&crit_sect);
3917
3918      if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3919        // Remove from the array those handles of the threads that have completed exiting.
3920        for (i = 0, j = 0; i < handle_count; ++i) {
3921          res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3922          if (res == WAIT_TIMEOUT) {
3923            handles[j++] = handles[i];
3924          } else {
3925            if (res == WAIT_FAILED) {
3926              warning("WaitForSingleObject failed (%u) in %s: %d\n",
3927                      GetLastError(), __FILE__, __LINE__);
3928            }
3929            // Don't keep the handle, if we failed waiting for it.
3930            CloseHandle(handles[i]);
3931          }
3932        }
3933
3934        // If there's no free slot in the array of the kept handles, we'll have to
3935        // wait until at least one thread completes exiting.
3936        if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3937          // Raise the priority of the oldest exiting thread to increase its chances
3938          // to complete sooner.
3939          SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3940          res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3941          if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3942            i = (res - WAIT_OBJECT_0);
3943            handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3944            for (; i < handle_count; ++i) {
3945              handles[i] = handles[i + 1];
3946            }
3947          } else {
3948            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3949                    (res == WAIT_FAILED ? "failed" : "timed out"),
3950                    GetLastError(), __FILE__, __LINE__);
3951            // Don't keep handles, if we failed waiting for them.
3952            for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3953              CloseHandle(handles[i]);
3954            }
3955            handle_count = 0;
3956          }
3957        }
3958
3959        // Store a duplicate of the current thread handle in the array of handles.
3960        hproc = GetCurrentProcess();
3961        hthr = GetCurrentThread();
3962        if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3963                             0, FALSE, DUPLICATE_SAME_ACCESS)) {
3964          warning("DuplicateHandle failed (%u) in %s: %d\n",
3965                  GetLastError(), __FILE__, __LINE__);
3966
3967          // We can't register this thread (no more handles) so this thread
3968          // may be racing with a thread that is calling exit(). If the thread
3969          // that is calling exit() has managed to set the process_exiting
3970          // flag, then this thread will be caught in the SuspendThread()
3971          // infinite loop below which closes that race. A small timing
3972          // window remains before the process_exiting flag is set, but it
3973          // is only exposed when we are out of handles.
3974        } else {
3975          ++handle_count;
3976          registered = true;
3977
3978          // The current exiting thread has stored its handle in the array, and now
3979          // should leave the critical section before calling _endthreadex().
3980        }
3981
3982      } else if (what != EPT_THREAD && handle_count > 0) {
3983        jlong start_time, finish_time, timeout_left;
3984        // Before ending the process, make sure all the threads that had called
3985        // _endthreadex() completed.
3986
3987        // Set the priority level of the current thread to the same value as
3988        // the priority level of exiting threads.
3989        // This is to ensure it will be given a fair chance to execute if
3990        // the timeout expires.
3991        hthr = GetCurrentThread();
3992        SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3993        start_time = os::javaTimeNanos();
3994        finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3995        for (i = 0; ; ) {
3996          int portion_count = handle_count - i;
3997          if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3998            portion_count = MAXIMUM_WAIT_OBJECTS;
3999          }
4000          for (j = 0; j < portion_count; ++j) {
4001            SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4002          }
4003          timeout_left = (finish_time - start_time) / 1000000L;
4004          if (timeout_left < 0) {
4005            timeout_left = 0;
4006          }
4007          res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4008          if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4009            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4010                    (res == WAIT_FAILED ? "failed" : "timed out"),
4011                    GetLastError(), __FILE__, __LINE__);
4012            // Reset portion_count so we close the remaining
4013            // handles due to this error.
4014            portion_count = handle_count - i;
4015          }
4016          for (j = 0; j < portion_count; ++j) {
4017            CloseHandle(handles[i + j]);
4018          }
4019          if ((i += portion_count) >= handle_count) {
4020            break;
4021          }
4022          start_time = os::javaTimeNanos();
4023        }
4024        handle_count = 0;
4025      }
4026
4027      LeaveCriticalSection(&crit_sect);
4028    }
4029
4030    if (!registered &&
4031        OrderAccess::load_acquire(&process_exiting) != 0 &&
4032        process_exiting != (jint)GetCurrentThreadId()) {
4033      // Some other thread is about to call exit(), so we don't let
4034      // the current unregistered thread proceed to exit() or _endthreadex()
4035      while (true) {
4036        SuspendThread(GetCurrentThread());
4037        // Avoid busy-wait loop, if SuspendThread() failed.
4038        Sleep(EXIT_TIMEOUT);
4039      }
4040    }
4041  }
4042
4043  // We are here if either
4044  // - there's no 'race at exit' bug on this OS release;
4045  // - initialization of the critical section failed (unlikely);
4046  // - the current thread has registered itself and left the critical section;
4047  // - the process-exiting thread has raised the flag and left the critical section.
4048  if (what == EPT_THREAD) {
4049    _endthreadex((unsigned)exit_code);
4050  } else if (what == EPT_PROCESS) {
4051    ::exit(exit_code);
4052  } else {
4053    _exit(exit_code);
4054  }
4055
4056  // Should not reach here
4057  return exit_code;
4058}
4059
4060#undef EXIT_TIMEOUT
4061
4062void os::win32::setmode_streams() {
4063  _setmode(_fileno(stdin), _O_BINARY);
4064  _setmode(_fileno(stdout), _O_BINARY);
4065  _setmode(_fileno(stderr), _O_BINARY);
4066}
4067
4068
4069bool os::is_debugger_attached() {
4070  return IsDebuggerPresent() ? true : false;
4071}
4072
4073
4074void os::wait_for_keypress_at_exit(void) {
4075  if (PauseAtExit) {
4076    fprintf(stderr, "Press any key to continue...\n");
4077    fgetc(stdin);
4078  }
4079}
4080
4081
4082bool os::message_box(const char* title, const char* message) {
4083  int result = MessageBox(NULL, message, title,
4084                          MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4085  return result == IDYES;
4086}
4087
4088#ifndef PRODUCT
4089#ifndef _WIN64
4090// Helpers to check whether NX protection is enabled
4091int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4092  if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4093      pex->ExceptionRecord->NumberParameters > 0 &&
4094      pex->ExceptionRecord->ExceptionInformation[0] ==
4095      EXCEPTION_INFO_EXEC_VIOLATION) {
4096    return EXCEPTION_EXECUTE_HANDLER;
4097  }
4098  return EXCEPTION_CONTINUE_SEARCH;
4099}
4100
4101void nx_check_protection() {
4102  // If NX is enabled we'll get an exception calling into code on the stack
4103  char code[] = { (char)0xC3 }; // ret
4104  void *code_ptr = (void *)code;
4105  __try {
4106    __asm call code_ptr
4107  } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4108    tty->print_raw_cr("NX protection detected.");
4109  }
4110}
4111#endif // _WIN64
4112#endif // PRODUCT
4113
4114// This is called _before_ the global arguments have been parsed
4115void os::init(void) {
4116  _initial_pid = _getpid();
4117
4118  init_random(1234567);
4119
4120  win32::initialize_system_info();
4121  win32::setmode_streams();
4122  init_page_sizes((size_t) win32::vm_page_size());
4123
4124  // This may be overridden later when argument processing is done.
4125  FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false);
4126
4127  // Initialize main_process and main_thread
4128  main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4129  if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4130                       &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4131    fatal("DuplicateHandle failed\n");
4132  }
4133  main_thread_id = (int) GetCurrentThreadId();
4134
4135  // initialize fast thread access - only used for 32-bit
4136  win32::initialize_thread_ptr_offset();
4137}
4138
4139// To install functions for atexit processing
4140extern "C" {
4141  static void perfMemory_exit_helper() {
4142    perfMemory_exit();
4143  }
4144}
4145
4146static jint initSock();
4147
4148// this is called _after_ the global arguments have been parsed
4149jint os::init_2(void) {
4150  // Allocate a single page and mark it as readable for safepoint polling
4151  address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY);
4152  guarantee(polling_page != NULL, "Reserve Failed for polling page");
4153
4154  address return_page  = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY);
4155  guarantee(return_page != NULL, "Commit Failed for polling page");
4156
4157  os::set_polling_page(polling_page);
4158  log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page));
4159
4160  if (!UseMembar) {
4161    address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE);
4162    guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page");
4163
4164    return_page  = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE);
4165    guarantee(return_page != NULL, "Commit Failed for memory serialize page");
4166
4167    os::set_memory_serialize_page(mem_serialize_page);
4168    log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page));
4169  }
4170
4171  // Setup Windows Exceptions
4172
4173  // for debugging float code generation bugs
4174  if (ForceFloatExceptions) {
4175#ifndef  _WIN64
4176    static long fp_control_word = 0;
4177    __asm { fstcw fp_control_word }
4178    // see Intel PPro Manual, Vol. 2, p 7-16
4179    const long precision = 0x20;
4180    const long underflow = 0x10;
4181    const long overflow  = 0x08;
4182    const long zero_div  = 0x04;
4183    const long denorm    = 0x02;
4184    const long invalid   = 0x01;
4185    fp_control_word |= invalid;
4186    __asm { fldcw fp_control_word }
4187#endif
4188  }
4189
4190  // If stack_commit_size is 0, windows will reserve the default size,
4191  // but only commit a small portion of it.
4192  size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size());
4193  size_t default_reserve_size = os::win32::default_stack_size();
4194  size_t actual_reserve_size = stack_commit_size;
4195  if (stack_commit_size < default_reserve_size) {
4196    // If stack_commit_size == 0, we want this too
4197    actual_reserve_size = default_reserve_size;
4198  }
4199
4200  // Check minimum allowable stack size for thread creation and to initialize
4201  // the java system classes, including StackOverflowError - depends on page
4202  // size.  Add two 4K pages for compiler2 recursion in main thread.
4203  // Add in 4*BytesPerWord 4K pages to account for VM stack during
4204  // class initialization depending on 32 or 64 bit VM.
4205  size_t min_stack_allowed =
4206            (size_t)(JavaThread::stack_guard_zone_size() +
4207                     JavaThread::stack_shadow_zone_size() +
4208                     (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4209
4210  min_stack_allowed = align_size_up(min_stack_allowed, os::vm_page_size());
4211
4212  if (actual_reserve_size < min_stack_allowed) {
4213    tty->print_cr("\nThe stack size specified is too small, "
4214                  "Specify at least %dk",
4215                  min_stack_allowed / K);
4216    return JNI_ERR;
4217  }
4218
4219  JavaThread::set_stack_size_at_create(stack_commit_size);
4220
4221  // Calculate theoretical max. size of Threads to guard gainst artifical
4222  // out-of-memory situations, where all available address-space has been
4223  // reserved by thread stacks.
4224  assert(actual_reserve_size != 0, "Must have a stack");
4225
4226  // Calculate the thread limit when we should start doing Virtual Memory
4227  // banging. Currently when the threads will have used all but 200Mb of space.
4228  //
4229  // TODO: consider performing a similar calculation for commit size instead
4230  // as reserve size, since on a 64-bit platform we'll run into that more
4231  // often than running out of virtual memory space.  We can use the
4232  // lower value of the two calculations as the os_thread_limit.
4233  size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4234  win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4235
4236  // at exit methods are called in the reverse order of their registration.
4237  // there is no limit to the number of functions registered. atexit does
4238  // not set errno.
4239
4240  if (PerfAllowAtExitRegistration) {
4241    // only register atexit functions if PerfAllowAtExitRegistration is set.
4242    // atexit functions can be delayed until process exit time, which
4243    // can be problematic for embedded VM situations. Embedded VMs should
4244    // call DestroyJavaVM() to assure that VM resources are released.
4245
4246    // note: perfMemory_exit_helper atexit function may be removed in
4247    // the future if the appropriate cleanup code can be added to the
4248    // VM_Exit VMOperation's doit method.
4249    if (atexit(perfMemory_exit_helper) != 0) {
4250      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4251    }
4252  }
4253
4254#ifndef _WIN64
4255  // Print something if NX is enabled (win32 on AMD64)
4256  NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4257#endif
4258
4259  // initialize thread priority policy
4260  prio_init();
4261
4262  if (UseNUMA && !ForceNUMA) {
4263    UseNUMA = false; // We don't fully support this yet
4264  }
4265
4266  if (UseNUMAInterleaving) {
4267    // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4268    bool success = numa_interleaving_init();
4269    if (!success) UseNUMAInterleaving = false;
4270  }
4271
4272  if (initSock() != JNI_OK) {
4273    return JNI_ERR;
4274  }
4275
4276  return JNI_OK;
4277}
4278
4279// Mark the polling page as unreadable
4280void os::make_polling_page_unreadable(void) {
4281  DWORD old_status;
4282  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4283                      PAGE_NOACCESS, &old_status)) {
4284    fatal("Could not disable polling page");
4285  }
4286}
4287
4288// Mark the polling page as readable
4289void os::make_polling_page_readable(void) {
4290  DWORD old_status;
4291  if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4292                      PAGE_READONLY, &old_status)) {
4293    fatal("Could not enable polling page");
4294  }
4295}
4296
4297
4298int os::stat(const char *path, struct stat *sbuf) {
4299  char pathbuf[MAX_PATH];
4300  if (strlen(path) > MAX_PATH - 1) {
4301    errno = ENAMETOOLONG;
4302    return -1;
4303  }
4304  os::native_path(strcpy(pathbuf, path));
4305  int ret = ::stat(pathbuf, sbuf);
4306  if (sbuf != NULL && UseUTCFileTimestamp) {
4307    // Fix for 6539723.  st_mtime returned from stat() is dependent on
4308    // the system timezone and so can return different values for the
4309    // same file if/when daylight savings time changes.  This adjustment
4310    // makes sure the same timestamp is returned regardless of the TZ.
4311    //
4312    // See:
4313    // http://msdn.microsoft.com/library/
4314    //   default.asp?url=/library/en-us/sysinfo/base/
4315    //   time_zone_information_str.asp
4316    // and
4317    // http://msdn.microsoft.com/library/default.asp?url=
4318    //   /library/en-us/sysinfo/base/settimezoneinformation.asp
4319    //
4320    // NOTE: there is a insidious bug here:  If the timezone is changed
4321    // after the call to stat() but before 'GetTimeZoneInformation()', then
4322    // the adjustment we do here will be wrong and we'll return the wrong
4323    // value (which will likely end up creating an invalid class data
4324    // archive).  Absent a better API for this, or some time zone locking
4325    // mechanism, we'll have to live with this risk.
4326    TIME_ZONE_INFORMATION tz;
4327    DWORD tzid = GetTimeZoneInformation(&tz);
4328    int daylightBias =
4329      (tzid == TIME_ZONE_ID_DAYLIGHT) ?  tz.DaylightBias : tz.StandardBias;
4330    sbuf->st_mtime += (tz.Bias + daylightBias) * 60;
4331  }
4332  return ret;
4333}
4334
4335
4336#define FT2INT64(ft) \
4337  ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4338
4339
4340// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4341// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4342// of a thread.
4343//
4344// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4345// the fast estimate available on the platform.
4346
4347// current_thread_cpu_time() is not optimized for Windows yet
4348jlong os::current_thread_cpu_time() {
4349  // return user + sys since the cost is the same
4350  return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4351}
4352
4353jlong os::thread_cpu_time(Thread* thread) {
4354  // consistent with what current_thread_cpu_time() returns.
4355  return os::thread_cpu_time(thread, true /* user+sys */);
4356}
4357
4358jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4359  return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4360}
4361
4362jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4363  // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4364  // If this function changes, os::is_thread_cpu_time_supported() should too
4365  FILETIME CreationTime;
4366  FILETIME ExitTime;
4367  FILETIME KernelTime;
4368  FILETIME UserTime;
4369
4370  if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4371                      &ExitTime, &KernelTime, &UserTime) == 0) {
4372    return -1;
4373  } else if (user_sys_cpu_time) {
4374    return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4375  } else {
4376    return FT2INT64(UserTime) * 100;
4377  }
4378}
4379
4380void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4381  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4382  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4383  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4384  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4385}
4386
4387void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4388  info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4389  info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4390  info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4391  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4392}
4393
4394bool os::is_thread_cpu_time_supported() {
4395  // see os::thread_cpu_time
4396  FILETIME CreationTime;
4397  FILETIME ExitTime;
4398  FILETIME KernelTime;
4399  FILETIME UserTime;
4400
4401  if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4402                      &KernelTime, &UserTime) == 0) {
4403    return false;
4404  } else {
4405    return true;
4406  }
4407}
4408
4409// Windows does't provide a loadavg primitive so this is stubbed out for now.
4410// It does have primitives (PDH API) to get CPU usage and run queue length.
4411// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4412// If we wanted to implement loadavg on Windows, we have a few options:
4413//
4414// a) Query CPU usage and run queue length and "fake" an answer by
4415//    returning the CPU usage if it's under 100%, and the run queue
4416//    length otherwise.  It turns out that querying is pretty slow
4417//    on Windows, on the order of 200 microseconds on a fast machine.
4418//    Note that on the Windows the CPU usage value is the % usage
4419//    since the last time the API was called (and the first call
4420//    returns 100%), so we'd have to deal with that as well.
4421//
4422// b) Sample the "fake" answer using a sampling thread and store
4423//    the answer in a global variable.  The call to loadavg would
4424//    just return the value of the global, avoiding the slow query.
4425//
4426// c) Sample a better answer using exponential decay to smooth the
4427//    value.  This is basically the algorithm used by UNIX kernels.
4428//
4429// Note that sampling thread starvation could affect both (b) and (c).
4430int os::loadavg(double loadavg[], int nelem) {
4431  return -1;
4432}
4433
4434
4435// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4436bool os::dont_yield() {
4437  return DontYieldALot;
4438}
4439
4440// This method is a slightly reworked copy of JDK's sysOpen
4441// from src/windows/hpi/src/sys_api_md.c
4442
4443int os::open(const char *path, int oflag, int mode) {
4444  char pathbuf[MAX_PATH];
4445
4446  if (strlen(path) > MAX_PATH - 1) {
4447    errno = ENAMETOOLONG;
4448    return -1;
4449  }
4450  os::native_path(strcpy(pathbuf, path));
4451  return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4452}
4453
4454FILE* os::open(int fd, const char* mode) {
4455  return ::_fdopen(fd, mode);
4456}
4457
4458// Is a (classpath) directory empty?
4459bool os::dir_is_empty(const char* path) {
4460  WIN32_FIND_DATA fd;
4461  HANDLE f = FindFirstFile(path, &fd);
4462  if (f == INVALID_HANDLE_VALUE) {
4463    return true;
4464  }
4465  FindClose(f);
4466  return false;
4467}
4468
4469// create binary file, rewriting existing file if required
4470int os::create_binary_file(const char* path, bool rewrite_existing) {
4471  int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4472  if (!rewrite_existing) {
4473    oflags |= _O_EXCL;
4474  }
4475  return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4476}
4477
4478// return current position of file pointer
4479jlong os::current_file_offset(int fd) {
4480  return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4481}
4482
4483// move file pointer to the specified offset
4484jlong os::seek_to_file_offset(int fd, jlong offset) {
4485  return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4486}
4487
4488
4489jlong os::lseek(int fd, jlong offset, int whence) {
4490  return (jlong) ::_lseeki64(fd, offset, whence);
4491}
4492
4493size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4494  OVERLAPPED ov;
4495  DWORD nread;
4496  BOOL result;
4497
4498  ZeroMemory(&ov, sizeof(ov));
4499  ov.Offset = (DWORD)offset;
4500  ov.OffsetHigh = (DWORD)(offset >> 32);
4501
4502  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4503
4504  result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4505
4506  return result ? nread : 0;
4507}
4508
4509
4510// This method is a slightly reworked copy of JDK's sysNativePath
4511// from src/windows/hpi/src/path_md.c
4512
4513// Convert a pathname to native format.  On win32, this involves forcing all
4514// separators to be '\\' rather than '/' (both are legal inputs, but Win95
4515// sometimes rejects '/') and removing redundant separators.  The input path is
4516// assumed to have been converted into the character encoding used by the local
4517// system.  Because this might be a double-byte encoding, care is taken to
4518// treat double-byte lead characters correctly.
4519//
4520// This procedure modifies the given path in place, as the result is never
4521// longer than the original.  There is no error return; this operation always
4522// succeeds.
4523char * os::native_path(char *path) {
4524  char *src = path, *dst = path, *end = path;
4525  char *colon = NULL;  // If a drive specifier is found, this will
4526                       // point to the colon following the drive letter
4527
4528  // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4529  assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4530          && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4531
4532  // Check for leading separators
4533#define isfilesep(c) ((c) == '/' || (c) == '\\')
4534  while (isfilesep(*src)) {
4535    src++;
4536  }
4537
4538  if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4539    // Remove leading separators if followed by drive specifier.  This
4540    // hack is necessary to support file URLs containing drive
4541    // specifiers (e.g., "file://c:/path").  As a side effect,
4542    // "/c:/path" can be used as an alternative to "c:/path".
4543    *dst++ = *src++;
4544    colon = dst;
4545    *dst++ = ':';
4546    src++;
4547  } else {
4548    src = path;
4549    if (isfilesep(src[0]) && isfilesep(src[1])) {
4550      // UNC pathname: Retain first separator; leave src pointed at
4551      // second separator so that further separators will be collapsed
4552      // into the second separator.  The result will be a pathname
4553      // beginning with "\\\\" followed (most likely) by a host name.
4554      src = dst = path + 1;
4555      path[0] = '\\';     // Force first separator to '\\'
4556    }
4557  }
4558
4559  end = dst;
4560
4561  // Remove redundant separators from remainder of path, forcing all
4562  // separators to be '\\' rather than '/'. Also, single byte space
4563  // characters are removed from the end of the path because those
4564  // are not legal ending characters on this operating system.
4565  //
4566  while (*src != '\0') {
4567    if (isfilesep(*src)) {
4568      *dst++ = '\\'; src++;
4569      while (isfilesep(*src)) src++;
4570      if (*src == '\0') {
4571        // Check for trailing separator
4572        end = dst;
4573        if (colon == dst - 2) break;  // "z:\\"
4574        if (dst == path + 1) break;   // "\\"
4575        if (dst == path + 2 && isfilesep(path[0])) {
4576          // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4577          // beginning of a UNC pathname.  Even though it is not, by
4578          // itself, a valid UNC pathname, we leave it as is in order
4579          // to be consistent with the path canonicalizer as well
4580          // as the win32 APIs, which treat this case as an invalid
4581          // UNC pathname rather than as an alias for the root
4582          // directory of the current drive.
4583          break;
4584        }
4585        end = --dst;  // Path does not denote a root directory, so
4586                      // remove trailing separator
4587        break;
4588      }
4589      end = dst;
4590    } else {
4591      if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4592        *dst++ = *src++;
4593        if (*src) *dst++ = *src++;
4594        end = dst;
4595      } else {  // Copy a single-byte character
4596        char c = *src++;
4597        *dst++ = c;
4598        // Space is not a legal ending character
4599        if (c != ' ') end = dst;
4600      }
4601    }
4602  }
4603
4604  *end = '\0';
4605
4606  // For "z:", add "." to work around a bug in the C runtime library
4607  if (colon == dst - 1) {
4608    path[2] = '.';
4609    path[3] = '\0';
4610  }
4611
4612  return path;
4613}
4614
4615// This code is a copy of JDK's sysSetLength
4616// from src/windows/hpi/src/sys_api_md.c
4617
4618int os::ftruncate(int fd, jlong length) {
4619  HANDLE h = (HANDLE)::_get_osfhandle(fd);
4620  long high = (long)(length >> 32);
4621  DWORD ret;
4622
4623  if (h == (HANDLE)(-1)) {
4624    return -1;
4625  }
4626
4627  ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4628  if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4629    return -1;
4630  }
4631
4632  if (::SetEndOfFile(h) == FALSE) {
4633    return -1;
4634  }
4635
4636  return 0;
4637}
4638
4639int os::get_fileno(FILE* fp) {
4640  return _fileno(fp);
4641}
4642
4643// This code is a copy of JDK's sysSync
4644// from src/windows/hpi/src/sys_api_md.c
4645// except for the legacy workaround for a bug in Win 98
4646
4647int os::fsync(int fd) {
4648  HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4649
4650  if ((!::FlushFileBuffers(handle)) &&
4651      (GetLastError() != ERROR_ACCESS_DENIED)) {
4652    // from winerror.h
4653    return -1;
4654  }
4655  return 0;
4656}
4657
4658static int nonSeekAvailable(int, long *);
4659static int stdinAvailable(int, long *);
4660
4661#define S_ISCHR(mode)   (((mode) & _S_IFCHR) == _S_IFCHR)
4662#define S_ISFIFO(mode)  (((mode) & _S_IFIFO) == _S_IFIFO)
4663
4664// This code is a copy of JDK's sysAvailable
4665// from src/windows/hpi/src/sys_api_md.c
4666
4667int os::available(int fd, jlong *bytes) {
4668  jlong cur, end;
4669  struct _stati64 stbuf64;
4670
4671  if (::_fstati64(fd, &stbuf64) >= 0) {
4672    int mode = stbuf64.st_mode;
4673    if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4674      int ret;
4675      long lpbytes;
4676      if (fd == 0) {
4677        ret = stdinAvailable(fd, &lpbytes);
4678      } else {
4679        ret = nonSeekAvailable(fd, &lpbytes);
4680      }
4681      (*bytes) = (jlong)(lpbytes);
4682      return ret;
4683    }
4684    if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4685      return FALSE;
4686    } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4687      return FALSE;
4688    } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4689      return FALSE;
4690    }
4691    *bytes = end - cur;
4692    return TRUE;
4693  } else {
4694    return FALSE;
4695  }
4696}
4697
4698void os::flockfile(FILE* fp) {
4699  _lock_file(fp);
4700}
4701
4702void os::funlockfile(FILE* fp) {
4703  _unlock_file(fp);
4704}
4705
4706// This code is a copy of JDK's nonSeekAvailable
4707// from src/windows/hpi/src/sys_api_md.c
4708
4709static int nonSeekAvailable(int fd, long *pbytes) {
4710  // This is used for available on non-seekable devices
4711  // (like both named and anonymous pipes, such as pipes
4712  //  connected to an exec'd process).
4713  // Standard Input is a special case.
4714  HANDLE han;
4715
4716  if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4717    return FALSE;
4718  }
4719
4720  if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4721    // PeekNamedPipe fails when at EOF.  In that case we
4722    // simply make *pbytes = 0 which is consistent with the
4723    // behavior we get on Solaris when an fd is at EOF.
4724    // The only alternative is to raise an Exception,
4725    // which isn't really warranted.
4726    //
4727    if (::GetLastError() != ERROR_BROKEN_PIPE) {
4728      return FALSE;
4729    }
4730    *pbytes = 0;
4731  }
4732  return TRUE;
4733}
4734
4735#define MAX_INPUT_EVENTS 2000
4736
4737// This code is a copy of JDK's stdinAvailable
4738// from src/windows/hpi/src/sys_api_md.c
4739
4740static int stdinAvailable(int fd, long *pbytes) {
4741  HANDLE han;
4742  DWORD numEventsRead = 0;  // Number of events read from buffer
4743  DWORD numEvents = 0;      // Number of events in buffer
4744  DWORD i = 0;              // Loop index
4745  DWORD curLength = 0;      // Position marker
4746  DWORD actualLength = 0;   // Number of bytes readable
4747  BOOL error = FALSE;       // Error holder
4748  INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4749
4750  if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4751    return FALSE;
4752  }
4753
4754  // Construct an array of input records in the console buffer
4755  error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4756  if (error == 0) {
4757    return nonSeekAvailable(fd, pbytes);
4758  }
4759
4760  // lpBuffer must fit into 64K or else PeekConsoleInput fails
4761  if (numEvents > MAX_INPUT_EVENTS) {
4762    numEvents = MAX_INPUT_EVENTS;
4763  }
4764
4765  lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4766  if (lpBuffer == NULL) {
4767    return FALSE;
4768  }
4769
4770  error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4771  if (error == 0) {
4772    os::free(lpBuffer);
4773    return FALSE;
4774  }
4775
4776  // Examine input records for the number of bytes available
4777  for (i=0; i<numEvents; i++) {
4778    if (lpBuffer[i].EventType == KEY_EVENT) {
4779
4780      KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4781                                      &(lpBuffer[i].Event);
4782      if (keyRecord->bKeyDown == TRUE) {
4783        CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4784        curLength++;
4785        if (*keyPressed == '\r') {
4786          actualLength = curLength;
4787        }
4788      }
4789    }
4790  }
4791
4792  if (lpBuffer != NULL) {
4793    os::free(lpBuffer);
4794  }
4795
4796  *pbytes = (long) actualLength;
4797  return TRUE;
4798}
4799
4800// Map a block of memory.
4801char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4802                        char *addr, size_t bytes, bool read_only,
4803                        bool allow_exec) {
4804  HANDLE hFile;
4805  char* base;
4806
4807  hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4808                     OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4809  if (hFile == NULL) {
4810    log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4811    return NULL;
4812  }
4813
4814  if (allow_exec) {
4815    // CreateFileMapping/MapViewOfFileEx can't map executable memory
4816    // unless it comes from a PE image (which the shared archive is not.)
4817    // Even VirtualProtect refuses to give execute access to mapped memory
4818    // that was not previously executable.
4819    //
4820    // Instead, stick the executable region in anonymous memory.  Yuck.
4821    // Penalty is that ~4 pages will not be shareable - in the future
4822    // we might consider DLLizing the shared archive with a proper PE
4823    // header so that mapping executable + sharing is possible.
4824
4825    base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4826                                PAGE_READWRITE);
4827    if (base == NULL) {
4828      log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4829      CloseHandle(hFile);
4830      return NULL;
4831    }
4832
4833    DWORD bytes_read;
4834    OVERLAPPED overlapped;
4835    overlapped.Offset = (DWORD)file_offset;
4836    overlapped.OffsetHigh = 0;
4837    overlapped.hEvent = NULL;
4838    // ReadFile guarantees that if the return value is true, the requested
4839    // number of bytes were read before returning.
4840    bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4841    if (!res) {
4842      log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4843      release_memory(base, bytes);
4844      CloseHandle(hFile);
4845      return NULL;
4846    }
4847  } else {
4848    HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4849                                    NULL /* file_name */);
4850    if (hMap == NULL) {
4851      log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4852      CloseHandle(hFile);
4853      return NULL;
4854    }
4855
4856    DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4857    base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4858                                  (DWORD)bytes, addr);
4859    if (base == NULL) {
4860      log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4861      CloseHandle(hMap);
4862      CloseHandle(hFile);
4863      return NULL;
4864    }
4865
4866    if (CloseHandle(hMap) == 0) {
4867      log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4868      CloseHandle(hFile);
4869      return base;
4870    }
4871  }
4872
4873  if (allow_exec) {
4874    DWORD old_protect;
4875    DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4876    bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4877
4878    if (!res) {
4879      log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4880      // Don't consider this a hard error, on IA32 even if the
4881      // VirtualProtect fails, we should still be able to execute
4882      CloseHandle(hFile);
4883      return base;
4884    }
4885  }
4886
4887  if (CloseHandle(hFile) == 0) {
4888    log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4889    return base;
4890  }
4891
4892  return base;
4893}
4894
4895
4896// Remap a block of memory.
4897char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4898                          char *addr, size_t bytes, bool read_only,
4899                          bool allow_exec) {
4900  // This OS does not allow existing memory maps to be remapped so we
4901  // have to unmap the memory before we remap it.
4902  if (!os::unmap_memory(addr, bytes)) {
4903    return NULL;
4904  }
4905
4906  // There is a very small theoretical window between the unmap_memory()
4907  // call above and the map_memory() call below where a thread in native
4908  // code may be able to access an address that is no longer mapped.
4909
4910  return os::map_memory(fd, file_name, file_offset, addr, bytes,
4911                        read_only, allow_exec);
4912}
4913
4914
4915// Unmap a block of memory.
4916// Returns true=success, otherwise false.
4917
4918bool os::pd_unmap_memory(char* addr, size_t bytes) {
4919  MEMORY_BASIC_INFORMATION mem_info;
4920  if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4921    log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4922    return false;
4923  }
4924
4925  // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4926  // Instead, executable region was allocated using VirtualAlloc(). See
4927  // pd_map_memory() above.
4928  //
4929  // The following flags should match the 'exec_access' flages used for
4930  // VirtualProtect() in pd_map_memory().
4931  if (mem_info.Protect == PAGE_EXECUTE_READ ||
4932      mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4933    return pd_release_memory(addr, bytes);
4934  }
4935
4936  BOOL result = UnmapViewOfFile(addr);
4937  if (result == 0) {
4938    log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4939    return false;
4940  }
4941  return true;
4942}
4943
4944void os::pause() {
4945  char filename[MAX_PATH];
4946  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4947    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4948  } else {
4949    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4950  }
4951
4952  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4953  if (fd != -1) {
4954    struct stat buf;
4955    ::close(fd);
4956    while (::stat(filename, &buf) == 0) {
4957      Sleep(100);
4958    }
4959  } else {
4960    jio_fprintf(stderr,
4961                "Could not open pause file '%s', continuing immediately.\n", filename);
4962  }
4963}
4964
4965os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() {
4966  assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread");
4967}
4968
4969// See the caveats for this class in os_windows.hpp
4970// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
4971// into this method and returns false. If no OS EXCEPTION was raised, returns
4972// true.
4973// The callback is supposed to provide the method that should be protected.
4974//
4975bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
4976  assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
4977  assert(!WatcherThread::watcher_thread()->has_crash_protection(),
4978         "crash_protection already set?");
4979
4980  bool success = true;
4981  __try {
4982    WatcherThread::watcher_thread()->set_crash_protection(this);
4983    cb.call();
4984  } __except(EXCEPTION_EXECUTE_HANDLER) {
4985    // only for protection, nothing to do
4986    success = false;
4987  }
4988  WatcherThread::watcher_thread()->set_crash_protection(NULL);
4989  return success;
4990}
4991
4992// An Event wraps a win32 "CreateEvent" kernel handle.
4993//
4994// We have a number of choices regarding "CreateEvent" win32 handle leakage:
4995//
4996// 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
4997//     field, and call CloseHandle() on the win32 event handle.  Unpark() would
4998//     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
4999//     In addition, an unpark() operation might fetch the handle field, but the
5000//     event could recycle between the fetch and the SetEvent() operation.
5001//     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5002//     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5003//     on an stale but recycled handle would be harmless, but in practice this might
5004//     confuse other non-Sun code, so it's not a viable approach.
5005//
5006// 2:  Once a win32 event handle is associated with an Event, it remains associated
5007//     with the Event.  The event handle is never closed.  This could be construed
5008//     as handle leakage, but only up to the maximum # of threads that have been extant
5009//     at any one time.  This shouldn't be an issue, as windows platforms typically
5010//     permit a process to have hundreds of thousands of open handles.
5011//
5012// 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5013//     and release unused handles.
5014//
5015// 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5016//     It's not clear, however, that we wouldn't be trading one type of leak for another.
5017//
5018// 5.  Use an RCU-like mechanism (Read-Copy Update).
5019//     Or perhaps something similar to Maged Michael's "Hazard pointers".
5020//
5021// We use (2).
5022//
5023// TODO-FIXME:
5024// 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5025// 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5026//     to recover from (or at least detect) the dreaded Windows 841176 bug.
5027// 3.  Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent
5028//     into a single win32 CreateEvent() handle.
5029//
5030// Assumption:
5031//    Only one parker can exist on an event, which is why we allocate
5032//    them per-thread. Multiple unparkers can coexist.
5033//
5034// _Event transitions in park()
5035//   -1 => -1 : illegal
5036//    1 =>  0 : pass - return immediately
5037//    0 => -1 : block; then set _Event to 0 before returning
5038//
5039// _Event transitions in unpark()
5040//    0 => 1 : just return
5041//    1 => 1 : just return
5042//   -1 => either 0 or 1; must signal target thread
5043//         That is, we can safely transition _Event from -1 to either
5044//         0 or 1.
5045//
5046// _Event serves as a restricted-range semaphore.
5047//   -1 : thread is blocked, i.e. there is a waiter
5048//    0 : neutral: thread is running or ready,
5049//        could have been signaled after a wait started
5050//    1 : signaled - thread is running or ready
5051//
5052// Another possible encoding of _Event would be with
5053// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5054//
5055
5056int os::PlatformEvent::park(jlong Millis) {
5057  // Transitions for _Event:
5058  //   -1 => -1 : illegal
5059  //    1 =>  0 : pass - return immediately
5060  //    0 => -1 : block; then set _Event to 0 before returning
5061
5062  guarantee(_ParkHandle != NULL , "Invariant");
5063  guarantee(Millis > 0          , "Invariant");
5064
5065  // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5066  // the initial park() operation.
5067  // Consider: use atomic decrement instead of CAS-loop
5068
5069  int v;
5070  for (;;) {
5071    v = _Event;
5072    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5073  }
5074  guarantee((v == 0) || (v == 1), "invariant");
5075  if (v != 0) return OS_OK;
5076
5077  // Do this the hard way by blocking ...
5078  // TODO: consider a brief spin here, gated on the success of recent
5079  // spin attempts by this thread.
5080  //
5081  // We decompose long timeouts into series of shorter timed waits.
5082  // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5083  // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5084  // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5085  // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5086  // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5087  // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5088  // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5089  // for the already waited time.  This policy does not admit any new outcomes.
5090  // In the future, however, we might want to track the accumulated wait time and
5091  // adjust Millis accordingly if we encounter a spurious wakeup.
5092
5093  const int MAXTIMEOUT = 0x10000000;
5094  DWORD rv = WAIT_TIMEOUT;
5095  while (_Event < 0 && Millis > 0) {
5096    DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5097    if (Millis > MAXTIMEOUT) {
5098      prd = MAXTIMEOUT;
5099    }
5100    rv = ::WaitForSingleObject(_ParkHandle, prd);
5101    assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5102    if (rv == WAIT_TIMEOUT) {
5103      Millis -= prd;
5104    }
5105  }
5106  v = _Event;
5107  _Event = 0;
5108  // see comment at end of os::PlatformEvent::park() below:
5109  OrderAccess::fence();
5110  // If we encounter a nearly simultanous timeout expiry and unpark()
5111  // we return OS_OK indicating we awoke via unpark().
5112  // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5113  return (v >= 0) ? OS_OK : OS_TIMEOUT;
5114}
5115
5116void os::PlatformEvent::park() {
5117  // Transitions for _Event:
5118  //   -1 => -1 : illegal
5119  //    1 =>  0 : pass - return immediately
5120  //    0 => -1 : block; then set _Event to 0 before returning
5121
5122  guarantee(_ParkHandle != NULL, "Invariant");
5123  // Invariant: Only the thread associated with the Event/PlatformEvent
5124  // may call park().
5125  // Consider: use atomic decrement instead of CAS-loop
5126  int v;
5127  for (;;) {
5128    v = _Event;
5129    if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5130  }
5131  guarantee((v == 0) || (v == 1), "invariant");
5132  if (v != 0) return;
5133
5134  // Do this the hard way by blocking ...
5135  // TODO: consider a brief spin here, gated on the success of recent
5136  // spin attempts by this thread.
5137  while (_Event < 0) {
5138    DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5139    assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5140  }
5141
5142  // Usually we'll find _Event == 0 at this point, but as
5143  // an optional optimization we clear it, just in case can
5144  // multiple unpark() operations drove _Event up to 1.
5145  _Event = 0;
5146  OrderAccess::fence();
5147  guarantee(_Event >= 0, "invariant");
5148}
5149
5150void os::PlatformEvent::unpark() {
5151  guarantee(_ParkHandle != NULL, "Invariant");
5152
5153  // Transitions for _Event:
5154  //    0 => 1 : just return
5155  //    1 => 1 : just return
5156  //   -1 => either 0 or 1; must signal target thread
5157  //         That is, we can safely transition _Event from -1 to either
5158  //         0 or 1.
5159  // See also: "Semaphores in Plan 9" by Mullender & Cox
5160  //
5161  // Note: Forcing a transition from "-1" to "1" on an unpark() means
5162  // that it will take two back-to-back park() calls for the owning
5163  // thread to block. This has the benefit of forcing a spurious return
5164  // from the first park() call after an unpark() call which will help
5165  // shake out uses of park() and unpark() without condition variables.
5166
5167  if (Atomic::xchg(1, &_Event) >= 0) return;
5168
5169  ::SetEvent(_ParkHandle);
5170}
5171
5172
5173// JSR166
5174// -------------------------------------------------------
5175
5176// The Windows implementation of Park is very straightforward: Basic
5177// operations on Win32 Events turn out to have the right semantics to
5178// use them directly. We opportunistically resuse the event inherited
5179// from Monitor.
5180
5181void Parker::park(bool isAbsolute, jlong time) {
5182  guarantee(_ParkEvent != NULL, "invariant");
5183  // First, demultiplex/decode time arguments
5184  if (time < 0) { // don't wait
5185    return;
5186  } else if (time == 0 && !isAbsolute) {
5187    time = INFINITE;
5188  } else if (isAbsolute) {
5189    time -= os::javaTimeMillis(); // convert to relative time
5190    if (time <= 0) {  // already elapsed
5191      return;
5192    }
5193  } else { // relative
5194    time /= 1000000;  // Must coarsen from nanos to millis
5195    if (time == 0) {  // Wait for the minimal time unit if zero
5196      time = 1;
5197    }
5198  }
5199
5200  JavaThread* thread = JavaThread::current();
5201
5202  // Don't wait if interrupted or already triggered
5203  if (Thread::is_interrupted(thread, false) ||
5204      WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5205    ResetEvent(_ParkEvent);
5206    return;
5207  } else {
5208    ThreadBlockInVM tbivm(thread);
5209    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5210    thread->set_suspend_equivalent();
5211
5212    WaitForSingleObject(_ParkEvent, time);
5213    ResetEvent(_ParkEvent);
5214
5215    // If externally suspended while waiting, re-suspend
5216    if (thread->handle_special_suspend_equivalent_condition()) {
5217      thread->java_suspend_self();
5218    }
5219  }
5220}
5221
5222void Parker::unpark() {
5223  guarantee(_ParkEvent != NULL, "invariant");
5224  SetEvent(_ParkEvent);
5225}
5226
5227// Run the specified command in a separate process. Return its exit value,
5228// or -1 on failure (e.g. can't create a new process).
5229int os::fork_and_exec(char* cmd) {
5230  STARTUPINFO si;
5231  PROCESS_INFORMATION pi;
5232
5233  memset(&si, 0, sizeof(si));
5234  si.cb = sizeof(si);
5235  memset(&pi, 0, sizeof(pi));
5236  BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5237                            cmd,    // command line
5238                            NULL,   // process security attribute
5239                            NULL,   // thread security attribute
5240                            TRUE,   // inherits system handles
5241                            0,      // no creation flags
5242                            NULL,   // use parent's environment block
5243                            NULL,   // use parent's starting directory
5244                            &si,    // (in) startup information
5245                            &pi);   // (out) process information
5246
5247  if (rslt) {
5248    // Wait until child process exits.
5249    WaitForSingleObject(pi.hProcess, INFINITE);
5250
5251    DWORD exit_code;
5252    GetExitCodeProcess(pi.hProcess, &exit_code);
5253
5254    // Close process and thread handles.
5255    CloseHandle(pi.hProcess);
5256    CloseHandle(pi.hThread);
5257
5258    return (int)exit_code;
5259  } else {
5260    return -1;
5261  }
5262}
5263
5264//--------------------------------------------------------------------------------------------------
5265// Non-product code
5266
5267static int mallocDebugIntervalCounter = 0;
5268static int mallocDebugCounter = 0;
5269
5270// For debugging possible bugs inside HeapWalk (a ring buffer)
5271#define SAVE_COUNT 8
5272static PROCESS_HEAP_ENTRY saved_heap_entries[SAVE_COUNT];
5273static int saved_heap_entry_index;
5274
5275bool os::check_heap(bool force) {
5276  if (++mallocDebugCounter < MallocVerifyStart && !force) return true;
5277  if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) {
5278    // Note: HeapValidate executes two hardware breakpoints when it finds something
5279    // wrong; at these points, eax contains the address of the offending block (I think).
5280    // To get to the exlicit error message(s) below, just continue twice.
5281    //
5282    // Note:  we want to check the CRT heap, which is not necessarily located in the
5283    // process default heap.
5284    HANDLE heap = (HANDLE) _get_heap_handle();
5285    if (!heap) {
5286      return true;
5287    }
5288
5289    // If we fail to lock the heap, then gflags.exe has been used
5290    // or some other special heap flag has been set that prevents
5291    // locking. We don't try to walk a heap we can't lock.
5292    if (HeapLock(heap) != 0) {
5293      PROCESS_HEAP_ENTRY phe;
5294      phe.lpData = NULL;
5295      memset(saved_heap_entries, 0, sizeof(saved_heap_entries));
5296      saved_heap_entry_index = 0;
5297      int count = 0;
5298
5299      while (HeapWalk(heap, &phe) != 0) {
5300        count ++;
5301        if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) &&
5302            !HeapValidate(heap, 0, phe.lpData)) {
5303          tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter);
5304          tty->print_cr("corrupted block near address %#x, length %d, count %d", phe.lpData, phe.cbData, count);
5305          HeapUnlock(heap);
5306          fatal("corrupted C heap");
5307        } else {
5308          // Save previous seen entries in a ring buffer. We have seen strange
5309          // heap corruption fatal errors that produced mdmp files, but when we load
5310          // these mdmp files in WinDBG, "!heap -triage" shows no error.
5311          // We can examine the saved_heap_entries[] array in the mdmp file to
5312          // diagnose such seemingly spurious errors reported by HeapWalk.
5313          saved_heap_entries[saved_heap_entry_index++] = phe;
5314          if (saved_heap_entry_index >= SAVE_COUNT) {
5315            saved_heap_entry_index = 0;
5316          }
5317        }
5318      }
5319      DWORD err = GetLastError();
5320      if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED &&
5321         (err == ERROR_INVALID_FUNCTION && phe.lpData != NULL)) {
5322        HeapUnlock(heap);
5323        fatal("heap walk aborted with error %d", err);
5324      }
5325      HeapUnlock(heap);
5326    }
5327    mallocDebugIntervalCounter = 0;
5328  }
5329  return true;
5330}
5331
5332
5333bool os::find(address addr, outputStream* st) {
5334  int offset = -1;
5335  bool result = false;
5336  char buf[256];
5337  if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5338    st->print(PTR_FORMAT " ", addr);
5339    if (strlen(buf) < sizeof(buf) - 1) {
5340      char* p = strrchr(buf, '\\');
5341      if (p) {
5342        st->print("%s", p + 1);
5343      } else {
5344        st->print("%s", buf);
5345      }
5346    } else {
5347        // The library name is probably truncated. Let's omit the library name.
5348        // See also JDK-8147512.
5349    }
5350    if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5351      st->print("::%s + 0x%x", buf, offset);
5352    }
5353    st->cr();
5354    result = true;
5355  }
5356  return result;
5357}
5358
5359LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) {
5360  DWORD exception_code = e->ExceptionRecord->ExceptionCode;
5361
5362  if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
5363    JavaThread* thread = JavaThread::current();
5364    PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord;
5365    address addr = (address) exceptionRecord->ExceptionInformation[1];
5366
5367    if (os::is_memory_serialize_page(thread, addr)) {
5368      return EXCEPTION_CONTINUE_EXECUTION;
5369    }
5370  }
5371
5372  return EXCEPTION_CONTINUE_SEARCH;
5373}
5374
5375// We don't build a headless jre for Windows
5376bool os::is_headless_jre() { return false; }
5377
5378static jint initSock() {
5379  WSADATA wsadata;
5380
5381  if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5382    jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5383                ::GetLastError());
5384    return JNI_ERR;
5385  }
5386  return JNI_OK;
5387}
5388
5389struct hostent* os::get_host_by_name(char* name) {
5390  return (struct hostent*)gethostbyname(name);
5391}
5392
5393int os::socket_close(int fd) {
5394  return ::closesocket(fd);
5395}
5396
5397int os::socket(int domain, int type, int protocol) {
5398  return ::socket(domain, type, protocol);
5399}
5400
5401int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5402  return ::connect(fd, him, len);
5403}
5404
5405int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5406  return ::recv(fd, buf, (int)nBytes, flags);
5407}
5408
5409int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5410  return ::send(fd, buf, (int)nBytes, flags);
5411}
5412
5413int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5414  return ::send(fd, buf, (int)nBytes, flags);
5415}
5416
5417// WINDOWS CONTEXT Flags for THREAD_SAMPLING
5418#if defined(IA32)
5419  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5420#elif defined (AMD64)
5421  #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5422#endif
5423
5424// returns true if thread could be suspended,
5425// false otherwise
5426static bool do_suspend(HANDLE* h) {
5427  if (h != NULL) {
5428    if (SuspendThread(*h) != ~0) {
5429      return true;
5430    }
5431  }
5432  return false;
5433}
5434
5435// resume the thread
5436// calling resume on an active thread is a no-op
5437static void do_resume(HANDLE* h) {
5438  if (h != NULL) {
5439    ResumeThread(*h);
5440  }
5441}
5442
5443// retrieve a suspend/resume context capable handle
5444// from the tid. Caller validates handle return value.
5445void get_thread_handle_for_extended_context(HANDLE* h,
5446                                            OSThread::thread_id_t tid) {
5447  if (h != NULL) {
5448    *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5449  }
5450}
5451
5452// Thread sampling implementation
5453//
5454void os::SuspendedThreadTask::internal_do_task() {
5455  CONTEXT    ctxt;
5456  HANDLE     h = NULL;
5457
5458  // get context capable handle for thread
5459  get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5460
5461  // sanity
5462  if (h == NULL || h == INVALID_HANDLE_VALUE) {
5463    return;
5464  }
5465
5466  // suspend the thread
5467  if (do_suspend(&h)) {
5468    ctxt.ContextFlags = sampling_context_flags;
5469    // get thread context
5470    GetThreadContext(h, &ctxt);
5471    SuspendedThreadTaskContext context(_thread, &ctxt);
5472    // pass context to Thread Sampling impl
5473    do_task(context);
5474    // resume thread
5475    do_resume(&h);
5476  }
5477
5478  // close handle
5479  CloseHandle(h);
5480}
5481
5482bool os::start_debugging(char *buf, int buflen) {
5483  int len = (int)strlen(buf);
5484  char *p = &buf[len];
5485
5486  jio_snprintf(p, buflen-len,
5487             "\n\n"
5488             "Do you want to debug the problem?\n\n"
5489             "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5490             "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5491             "Otherwise, select 'No' to abort...",
5492             os::current_process_id(), os::current_thread_id());
5493
5494  bool yes = os::message_box("Unexpected Error", buf);
5495
5496  if (yes) {
5497    // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5498    // exception. If VM is running inside a debugger, the debugger will
5499    // catch the exception. Otherwise, the breakpoint exception will reach
5500    // the default windows exception handler, which can spawn a debugger and
5501    // automatically attach to the dying VM.
5502    os::breakpoint();
5503    yes = false;
5504  }
5505  return yes;
5506}
5507
5508void* os::get_default_process_handle() {
5509  return (void*)GetModuleHandle(NULL);
5510}
5511
5512// Builds a platform dependent Agent_OnLoad_<lib_name> function name
5513// which is used to find statically linked in agents.
5514// Additionally for windows, takes into account __stdcall names.
5515// Parameters:
5516//            sym_name: Symbol in library we are looking for
5517//            lib_name: Name of library to look in, NULL for shared libs.
5518//            is_absolute_path == true if lib_name is absolute path to agent
5519//                                     such as "C:/a/b/L.dll"
5520//            == false if only the base name of the library is passed in
5521//               such as "L"
5522char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5523                                    bool is_absolute_path) {
5524  char *agent_entry_name;
5525  size_t len;
5526  size_t name_len;
5527  size_t prefix_len = strlen(JNI_LIB_PREFIX);
5528  size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5529  const char *start;
5530
5531  if (lib_name != NULL) {
5532    len = name_len = strlen(lib_name);
5533    if (is_absolute_path) {
5534      // Need to strip path, prefix and suffix
5535      if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5536        lib_name = ++start;
5537      } else {
5538        // Need to check for drive prefix
5539        if ((start = strchr(lib_name, ':')) != NULL) {
5540          lib_name = ++start;
5541        }
5542      }
5543      if (len <= (prefix_len + suffix_len)) {
5544        return NULL;
5545      }
5546      lib_name += prefix_len;
5547      name_len = strlen(lib_name) - suffix_len;
5548    }
5549  }
5550  len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5551  agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5552  if (agent_entry_name == NULL) {
5553    return NULL;
5554  }
5555  if (lib_name != NULL) {
5556    const char *p = strrchr(sym_name, '@');
5557    if (p != NULL && p != sym_name) {
5558      // sym_name == _Agent_OnLoad@XX
5559      strncpy(agent_entry_name, sym_name, (p - sym_name));
5560      agent_entry_name[(p-sym_name)] = '\0';
5561      // agent_entry_name == _Agent_OnLoad
5562      strcat(agent_entry_name, "_");
5563      strncat(agent_entry_name, lib_name, name_len);
5564      strcat(agent_entry_name, p);
5565      // agent_entry_name == _Agent_OnLoad_lib_name@XX
5566    } else {
5567      strcpy(agent_entry_name, sym_name);
5568      strcat(agent_entry_name, "_");
5569      strncat(agent_entry_name, lib_name, name_len);
5570    }
5571  } else {
5572    strcpy(agent_entry_name, sym_name);
5573  }
5574  return agent_entry_name;
5575}
5576
5577#ifndef PRODUCT
5578
5579// test the code path in reserve_memory_special() that tries to allocate memory in a single
5580// contiguous memory block at a particular address.
5581// The test first tries to find a good approximate address to allocate at by using the same
5582// method to allocate some memory at any address. The test then tries to allocate memory in
5583// the vicinity (not directly after it to avoid possible by-chance use of that location)
5584// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5585// the previously allocated memory is available for allocation. The only actual failure
5586// that is reported is when the test tries to allocate at a particular location but gets a
5587// different valid one. A NULL return value at this point is not considered an error but may
5588// be legitimate.
5589// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
5590void TestReserveMemorySpecial_test() {
5591  if (!UseLargePages) {
5592    if (VerboseInternalVMTests) {
5593      tty->print("Skipping test because large pages are disabled");
5594    }
5595    return;
5596  }
5597  // save current value of globals
5598  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5599  bool old_use_numa_interleaving = UseNUMAInterleaving;
5600
5601  // set globals to make sure we hit the correct code path
5602  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5603
5604  // do an allocation at an address selected by the OS to get a good one.
5605  const size_t large_allocation_size = os::large_page_size() * 4;
5606  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5607  if (result == NULL) {
5608    if (VerboseInternalVMTests) {
5609      tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.",
5610                          large_allocation_size);
5611    }
5612  } else {
5613    os::release_memory_special(result, large_allocation_size);
5614
5615    // allocate another page within the recently allocated memory area which seems to be a good location. At least
5616    // we managed to get it once.
5617    const size_t expected_allocation_size = os::large_page_size();
5618    char* expected_location = result + os::large_page_size();
5619    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5620    if (actual_location == NULL) {
5621      if (VerboseInternalVMTests) {
5622        tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.",
5623                            expected_location, large_allocation_size);
5624      }
5625    } else {
5626      // release memory
5627      os::release_memory_special(actual_location, expected_allocation_size);
5628      // only now check, after releasing any memory to avoid any leaks.
5629      assert(actual_location == expected_location,
5630             "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5631             expected_location, expected_allocation_size, actual_location);
5632    }
5633  }
5634
5635  // restore globals
5636  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5637  UseNUMAInterleaving = old_use_numa_interleaving;
5638}
5639#endif // PRODUCT
5640
5641/*
5642  All the defined signal names for Windows.
5643
5644  NOTE that not all of these names are accepted by FindSignal!
5645
5646  For various reasons some of these may be rejected at runtime.
5647
5648  Here are the names currently accepted by a user of sun.misc.Signal with
5649  1.4.1 (ignoring potential interaction with use of chaining, etc):
5650
5651     (LIST TBD)
5652
5653*/
5654int os::get_signal_number(const char* name) {
5655  static const struct {
5656    char* name;
5657    int   number;
5658  } siglabels [] =
5659    // derived from version 6.0 VC98/include/signal.h
5660  {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5661  "FPE",        SIGFPE,         // floating point exception
5662  "SEGV",       SIGSEGV,        // segment violation
5663  "INT",        SIGINT,         // interrupt
5664  "TERM",       SIGTERM,        // software term signal from kill
5665  "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5666  "ILL",        SIGILL};        // illegal instruction
5667  for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5668    if (strcmp(name, siglabels[i].name) == 0) {
5669      return siglabels[i].number;
5670    }
5671  }
5672  return -1;
5673}
5674
5675// Fast current thread access
5676
5677int os::win32::_thread_ptr_offset = 0;
5678
5679static void call_wrapper_dummy() {}
5680
5681// We need to call the os_exception_wrapper once so that it sets
5682// up the offset from FS of the thread pointer.
5683void os::win32::initialize_thread_ptr_offset() {
5684  os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5685                           NULL, NULL, NULL, NULL);
5686}
5687