os.hpp revision 1601:126ea7725993
1/*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25// os defines the interface to operating system; this includes traditional
26// OS services (time, I/O) as well as other functionality with system-
27// dependent code.
28
29typedef void (*dll_func)(...);
30
31class Thread;
32class JavaThread;
33class Event;
34class DLL;
35class FileHandle;
36template<class E> class GrowableArray;
37
38// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
39
40// Platform-independent error return values from OS functions
41enum OSReturn {
42  OS_OK         =  0,        // Operation was successful
43  OS_ERR        = -1,        // Operation failed
44  OS_INTRPT     = -2,        // Operation was interrupted
45  OS_TIMEOUT    = -3,        // Operation timed out
46  OS_NOMEM      = -5,        // Operation failed for lack of memory
47  OS_NORESOURCE = -6         // Operation failed for lack of nonmemory resource
48};
49
50enum ThreadPriority {        // JLS 20.20.1-3
51  NoPriority       = -1,     // Initial non-priority value
52  MinPriority      =  1,     // Minimum priority
53  NormPriority     =  5,     // Normal (non-daemon) priority
54  NearMaxPriority  =  9,     // High priority, used for VMThread
55  MaxPriority      = 10      // Highest priority, used for WatcherThread
56                             // ensures that VMThread doesn't starve profiler
57};
58
59// Typedef for structured exception handling support
60typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
61
62class os: AllStatic {
63 public:
64  enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
65
66 private:
67  static OSThread*          _starting_thread;
68  static address            _polling_page;
69  static volatile int32_t * _mem_serialize_page;
70  static uintptr_t          _serialize_page_mask;
71 public:
72  static size_t             _page_sizes[page_sizes_max];
73
74 private:
75  static void init_page_sizes(size_t default_page_size) {
76    _page_sizes[0] = default_page_size;
77    _page_sizes[1] = 0; // sentinel
78  }
79
80 public:
81
82  static void init(void);                      // Called before command line parsing
83  static jint init_2(void);                    // Called after command line parsing
84  static void init_3(void);                    // Called at the end of vm init
85
86  // File names are case-insensitive on windows only
87  // Override me as needed
88  static int    file_name_strcmp(const char* s1, const char* s2);
89
90  static bool getenv(const char* name, char* buffer, int len);
91  static bool have_special_privileges();
92
93  static jlong  javaTimeMillis();
94  static jlong  javaTimeNanos();
95  static void   javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
96  static void   run_periodic_checks();
97
98
99  // Returns the elapsed time in seconds since the vm started.
100  static double elapsedTime();
101
102  // Returns real time in seconds since an arbitrary point
103  // in the past.
104  static bool getTimesSecs(double* process_real_time,
105                           double* process_user_time,
106                           double* process_system_time);
107
108  // Interface to the performance counter
109  static jlong elapsed_counter();
110  static jlong elapsed_frequency();
111
112  // The "virtual time" of a thread is the amount of time a thread has
113  // actually run.  The first function indicates whether the OS supports
114  // this functionality for the current thread, and if so:
115  //   * the second enables vtime tracking (if that is required).
116  //   * the third tells whether vtime is enabled.
117  //   * the fourth returns the elapsed virtual time for the current
118  //     thread.
119  static bool supports_vtime();
120  static bool enable_vtime();
121  static bool vtime_enabled();
122  static double elapsedVTime();
123
124  // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
125  // It is MT safe, but not async-safe, as reading time zone
126  // information may require a lock on some platforms.
127  static char*      local_time_string(char *buf, size_t buflen);
128  static struct tm* localtime_pd     (const time_t* clock, struct tm*  res);
129  // Fill in buffer with current local time as an ISO-8601 string.
130  // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
131  // Returns buffer, or NULL if it failed.
132  static char* iso8601_time(char* buffer, size_t buffer_length);
133
134  // Interface for detecting multiprocessor system
135  static inline bool is_MP() {
136    assert(_processor_count > 0, "invalid processor count");
137    return _processor_count > 1;
138  }
139  static julong available_memory();
140  static julong physical_memory();
141  static julong allocatable_physical_memory(julong size);
142  static bool is_server_class_machine();
143
144  // number of CPUs
145  static int processor_count() {
146    return _processor_count;
147  }
148  static void set_processor_count(int count) { _processor_count = count; }
149
150  // Returns the number of CPUs this process is currently allowed to run on.
151  // Note that on some OSes this can change dynamically.
152  static int active_processor_count();
153
154  // Bind processes to processors.
155  //     This is a two step procedure:
156  //     first you generate a distribution of processes to processors,
157  //     then you bind processes according to that distribution.
158  // Compute a distribution for number of processes to processors.
159  //    Stores the processor id's into the distribution array argument.
160  //    Returns true if it worked, false if it didn't.
161  static bool distribute_processes(uint length, uint* distribution);
162  // Binds the current process to a processor.
163  //    Returns true if it worked, false if it didn't.
164  static bool bind_to_processor(uint processor_id);
165
166  // Interface for stack banging (predetect possible stack overflow for
167  // exception processing)  There are guard pages, and above that shadow
168  // pages for stack overflow checking.
169  static bool uses_stack_guard_pages();
170  static bool allocate_stack_guard_pages();
171  static void bang_stack_shadow_pages();
172  static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
173
174  // OS interface to Virtual Memory
175
176  // Return the default page size.
177  static int    vm_page_size();
178
179  // Return the page size to use for a region of memory.  The min_pages argument
180  // is a hint intended to limit fragmentation; it says the returned page size
181  // should be <= region_max_size / min_pages.  Because min_pages is a hint,
182  // this routine may return a size larger than region_max_size / min_pages.
183  //
184  // The current implementation ignores min_pages if a larger page size is an
185  // exact multiple of both region_min_size and region_max_size.  This allows
186  // larger pages to be used when doing so would not cause fragmentation; in
187  // particular, a single page can be used when region_min_size ==
188  // region_max_size == a supported page size.
189  static size_t page_size_for_region(size_t region_min_size,
190                                     size_t region_max_size,
191                                     uint min_pages);
192
193  // Method for tracing page sizes returned by the above method; enabled by
194  // TracePageSizes.  The region_{min,max}_size parameters should be the values
195  // passed to page_size_for_region() and page_size should be the result of that
196  // call.  The (optional) base and size parameters should come from the
197  // ReservedSpace base() and size() methods.
198  static void trace_page_sizes(const char* str, const size_t region_min_size,
199                               const size_t region_max_size,
200                               const size_t page_size,
201                               const char* base = NULL,
202                               const size_t size = 0) PRODUCT_RETURN;
203
204  static int    vm_allocation_granularity();
205  static char*  reserve_memory(size_t bytes, char* addr = 0,
206                               size_t alignment_hint = 0);
207  static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
208  static void   split_reserved_memory(char *base, size_t size,
209                                      size_t split, bool realloc);
210  static bool   commit_memory(char* addr, size_t bytes,
211                              bool executable = false);
212  static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
213                              bool executable = false);
214  static bool   uncommit_memory(char* addr, size_t bytes);
215  static bool   release_memory(char* addr, size_t bytes);
216
217  enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
218  static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
219                               bool is_committed = true);
220
221  static bool   guard_memory(char* addr, size_t bytes);
222  static bool   unguard_memory(char* addr, size_t bytes);
223  static bool   create_stack_guard_pages(char* addr, size_t bytes);
224  static bool   remove_stack_guard_pages(char* addr, size_t bytes);
225
226  static char*  map_memory(int fd, const char* file_name, size_t file_offset,
227                           char *addr, size_t bytes, bool read_only = false,
228                           bool allow_exec = false);
229  static char*  remap_memory(int fd, const char* file_name, size_t file_offset,
230                             char *addr, size_t bytes, bool read_only,
231                             bool allow_exec);
232  static bool   unmap_memory(char *addr, size_t bytes);
233  static void   free_memory(char *addr, size_t bytes);
234  static void   realign_memory(char *addr, size_t bytes, size_t alignment_hint);
235
236  // NUMA-specific interface
237  static bool   numa_has_static_binding();
238  static bool   numa_has_group_homing();
239  static void   numa_make_local(char *addr, size_t bytes, int lgrp_hint);
240  static void   numa_make_global(char *addr, size_t bytes);
241  static size_t numa_get_groups_num();
242  static size_t numa_get_leaf_groups(int *ids, size_t size);
243  static bool   numa_topology_changed();
244  static int    numa_get_group_id();
245
246  // Page manipulation
247  struct page_info {
248    size_t size;
249    int lgrp_id;
250  };
251  static bool   get_page_info(char *start, page_info* info);
252  static char*  scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found);
253
254  static char*  non_memory_address_word();
255  // reserve, commit and pin the entire memory region
256  static char*  reserve_memory_special(size_t size, char* addr = NULL,
257                bool executable = false);
258  static bool   release_memory_special(char* addr, size_t bytes);
259  static bool   large_page_init();
260  static size_t large_page_size();
261  static bool   can_commit_large_page_memory();
262  static bool   can_execute_large_page_memory();
263
264  // OS interface to polling page
265  static address get_polling_page()             { return _polling_page; }
266  static void    set_polling_page(address page) { _polling_page = page; }
267  static bool    is_poll_address(address addr)  { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); }
268  static void    make_polling_page_unreadable();
269  static void    make_polling_page_readable();
270
271  // Routines used to serialize the thread state without using membars
272  static void    serialize_thread_states();
273
274  // Since we write to the serialize page from every thread, we
275  // want stores to be on unique cache lines whenever possible
276  // in order to minimize CPU cross talk.  We pre-compute the
277  // amount to shift the thread* to make this offset unique to
278  // each thread.
279  static int     get_serialize_page_shift_count() {
280    return SerializePageShiftCount;
281  }
282
283  static void     set_serialize_page_mask(uintptr_t mask) {
284    _serialize_page_mask = mask;
285  }
286
287  static unsigned int  get_serialize_page_mask() {
288    return _serialize_page_mask;
289  }
290
291  static void    set_memory_serialize_page(address page);
292
293  static address get_memory_serialize_page() {
294    return (address)_mem_serialize_page;
295  }
296
297  static inline void write_memory_serialize_page(JavaThread *thread) {
298    uintptr_t page_offset = ((uintptr_t)thread >>
299                            get_serialize_page_shift_count()) &
300                            get_serialize_page_mask();
301    *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
302  }
303
304  static bool    is_memory_serialize_page(JavaThread *thread, address addr) {
305    if (UseMembar) return false;
306    // Previously this function calculated the exact address of this
307    // thread's serialize page, and checked if the faulting address
308    // was equal.  However, some platforms mask off faulting addresses
309    // to the page size, so now we just check that the address is
310    // within the page.  This makes the thread argument unnecessary,
311    // but we retain the NULL check to preserve existing behaviour.
312    if (thread == NULL) return false;
313    address page = (address) _mem_serialize_page;
314    return addr >= page && addr < (page + os::vm_page_size());
315  }
316
317  static void block_on_serialize_page_trap();
318
319  // threads
320
321  enum ThreadType {
322    vm_thread,
323    cgc_thread,        // Concurrent GC thread
324    pgc_thread,        // Parallel GC thread
325    java_thread,
326    compiler_thread,
327    watcher_thread,
328    os_thread
329  };
330
331  static bool create_thread(Thread* thread,
332                            ThreadType thr_type,
333                            size_t stack_size = 0);
334  static bool create_main_thread(JavaThread* thread);
335  static bool create_attached_thread(JavaThread* thread);
336  static void pd_start_thread(Thread* thread);
337  static void start_thread(Thread* thread);
338
339  static void initialize_thread();
340  static void free_thread(OSThread* osthread);
341
342  // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit
343  static intx current_thread_id();
344  static int current_process_id();
345  // hpi::read for calls from non native state
346  // For performance, hpi::read is only callable from _thread_in_native
347  static size_t read(int fd, void *buf, unsigned int nBytes);
348  static int sleep(Thread* thread, jlong ms, bool interruptable);
349  static int naked_sleep();
350  static void infinite_sleep(); // never returns, use with CAUTION
351  static void yield();        // Yields to all threads with same priority
352  enum YieldResult {
353    YIELD_SWITCHED = 1,         // caller descheduled, other ready threads exist & ran
354    YIELD_NONEREADY = 0,        // No other runnable/ready threads.
355                                // platform-specific yield return immediately
356    YIELD_UNKNOWN = -1          // Unknown: platform doesn't support _SWITCHED or _NONEREADY
357    // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
358    // yield that can be used in lieu of blocking.
359  } ;
360  static YieldResult NakedYield () ;
361  static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
362  static void loop_breaker(int attempts);  // called from within tight loops to possibly influence time-sharing
363  static OSReturn set_priority(Thread* thread, ThreadPriority priority);
364  static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
365
366  static void interrupt(Thread* thread);
367  static bool is_interrupted(Thread* thread, bool clear_interrupted);
368
369  static int pd_self_suspend_thread(Thread* thread);
370
371  static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
372  static frame      fetch_frame_from_context(void* ucVoid);
373
374  static ExtendedPC get_thread_pc(Thread *thread);
375  static void breakpoint();
376
377  static address current_stack_pointer();
378  static address current_stack_base();
379  static size_t current_stack_size();
380
381  static int message_box(const char* title, const char* message);
382  static char* do_you_want_to_debug(const char* message);
383
384  // run cmd in a separate process and return its exit code; or -1 on failures
385  static int fork_and_exec(char *cmd);
386
387  // Set file to send error reports.
388  static void set_error_file(const char *logfile);
389
390  // os::exit() is merged with vm_exit()
391  // static void exit(int num);
392
393  // Terminate the VM, but don't exit the process
394  static void shutdown();
395
396  // Terminate with an error.  Default is to generate a core file on platforms
397  // that support such things.  This calls shutdown() and then aborts.
398  static void abort(bool dump_core = true);
399
400  // Die immediately, no exit hook, no abort hook, no cleanup.
401  static void die();
402
403  // Reading directories.
404  static DIR*           opendir(const char* dirname);
405  static int            readdir_buf_size(const char *path);
406  static struct dirent* readdir(DIR* dirp, dirent* dbuf);
407  static int            closedir(DIR* dirp);
408
409  // Dynamic library extension
410  static const char*    dll_file_extension();
411
412  static const char*    get_temp_directory();
413  static const char*    get_current_directory(char *buf, int buflen);
414
415  // Builds a platform-specific full library path given a ld path and lib name
416  static void           dll_build_name(char* buffer, size_t size,
417                                       const char* pathname, const char* fname);
418
419  // Symbol lookup, find nearest function name; basically it implements
420  // dladdr() for all platforms. Name of the nearest function is copied
421  // to buf. Distance from its base address is returned as offset.
422  // If function name is not found, buf[0] is set to '\0' and offset is
423  // set to -1.
424  static bool dll_address_to_function_name(address addr, char* buf,
425                                           int buflen, int* offset);
426
427  // Locate DLL/DSO. On success, full path of the library is copied to
428  // buf, and offset is set to be the distance between addr and the
429  // library's base address. On failure, buf[0] is set to '\0' and
430  // offset is set to -1.
431  static bool dll_address_to_library_name(address addr, char* buf,
432                                          int buflen, int* offset);
433
434  // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
435  static bool address_is_in_vm(address addr);
436
437  // Loads .dll/.so and
438  // in case of error it checks if .dll/.so was built for the
439  // same architecture as Hotspot is running on
440  static void* dll_load(const char *name, char *ebuf, int ebuflen);
441
442  // lookup symbol in a shared library
443  static void* dll_lookup(void* handle, const char* name);
444
445  // Print out system information; they are called by fatal error handler.
446  // Output format may be different on different platforms.
447  static void print_os_info(outputStream* st);
448  static void print_cpu_info(outputStream* st);
449  static void print_memory_info(outputStream* st);
450  static void print_dll_info(outputStream* st);
451  static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
452  static void print_context(outputStream* st, void* context);
453  static void print_siginfo(outputStream* st, void* siginfo);
454  static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
455  static void print_date_and_time(outputStream* st);
456
457  static void print_location(outputStream* st, intptr_t x, bool print_pc = false);
458
459  // The following two functions are used by fatal error handler to trace
460  // native (C) frames. They are not part of frame.hpp/frame.cpp because
461  // frame.hpp/cpp assume thread is JavaThread, and also because different
462  // OS/compiler may have different convention or provide different API to
463  // walk C frames.
464  //
465  // We don't attempt to become a debugger, so we only follow frames if that
466  // does not require a lookup in the unwind table, which is part of the binary
467  // file but may be unsafe to read after a fatal error. So on x86, we can
468  // only walk stack if %ebp is used as frame pointer; on ia64, it's not
469  // possible to walk C stack without having the unwind table.
470  static bool is_first_C_frame(frame *fr);
471  static frame get_sender_for_C_frame(frame *fr);
472
473  // return current frame. pc() and sp() are set to NULL on failure.
474  static frame      current_frame();
475
476  static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
477
478  // returns a string to describe the exception/signal;
479  // returns NULL if exception_code is not an OS exception/signal.
480  static const char* exception_name(int exception_code, char* buf, size_t buflen);
481
482  // Returns native Java library, loads if necessary
483  static void*    native_java_library();
484
485  // Fills in path to jvm.dll/libjvm.so (this info used to find hpi).
486  static void     jvm_path(char *buf, jint buflen);
487
488  // Returns true if we are running in a headless jre.
489  static bool     is_headless_jre();
490
491  // JNI names
492  static void     print_jni_name_prefix_on(outputStream* st, int args_size);
493  static void     print_jni_name_suffix_on(outputStream* st, int args_size);
494
495  // File conventions
496  static const char* file_separator();
497  static const char* line_separator();
498  static const char* path_separator();
499
500  // Init os specific system properties values
501  static void init_system_properties_values();
502
503  // IO operations, non-JVM_ version.
504  static int stat(const char* path, struct stat* sbuf);
505  static bool dir_is_empty(const char* path);
506
507  // IO operations on binary files
508  static int create_binary_file(const char* path, bool rewrite_existing);
509  static jlong current_file_offset(int fd);
510  static jlong seek_to_file_offset(int fd, jlong offset);
511
512  // Thread Local Storage
513  static int   allocate_thread_local_storage();
514  static void  thread_local_storage_at_put(int index, void* value);
515  static void* thread_local_storage_at(int index);
516  static void  free_thread_local_storage(int index);
517
518  // General allocation (must be MT-safe)
519  static void* malloc  (size_t size);
520  static void* realloc (void *memblock, size_t size);
521  static void  free    (void *memblock);
522  static bool  check_heap(bool force = false);      // verify C heap integrity
523  static char* strdup(const char *);  // Like strdup
524
525#ifndef PRODUCT
526  static int  num_mallocs;            // # of calls to malloc/realloc
527  static size_t  alloc_bytes;         // # of bytes allocated
528  static int  num_frees;              // # of calls to free
529#endif
530
531  // Printing 64 bit integers
532  static const char* jlong_format_specifier();
533  static const char* julong_format_specifier();
534
535  // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
536  static void  signal_init();
537  static void  signal_init_pd();
538  static void  signal_notify(int signal_number);
539  static void* signal(int signal_number, void* handler);
540  static void  signal_raise(int signal_number);
541  static int   signal_wait();
542  static int   signal_lookup();
543  static void* user_handler();
544  static void  terminate_signal_thread();
545  static int   sigexitnum_pd();
546
547  // random number generation
548  static long random();                    // return 32bit pseudorandom number
549  static void init_random(long initval);   // initialize random sequence
550
551  // Structured OS Exception support
552  static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
553
554  // JVMTI & JVM monitoring and management support
555  // The thread_cpu_time() and current_thread_cpu_time() are only
556  // supported if is_thread_cpu_time_supported() returns true.
557  // They are not supported on Solaris T1.
558
559  // Thread CPU Time - return the fast estimate on a platform
560  // On Solaris - call gethrvtime (fast) - user time only
561  // On Linux   - fast clock_gettime where available - user+sys
562  //            - otherwise: very slow /proc fs - user+sys
563  // On Windows - GetThreadTimes - user+sys
564  static jlong current_thread_cpu_time();
565  static jlong thread_cpu_time(Thread* t);
566
567  // Thread CPU Time with user_sys_cpu_time parameter.
568  //
569  // If user_sys_cpu_time is true, user+sys time is returned.
570  // Otherwise, only user time is returned
571  static jlong current_thread_cpu_time(bool user_sys_cpu_time);
572  static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time);
573
574  // Return a bunch of info about the timers.
575  // Note that the returned info for these two functions may be different
576  // on some platforms
577  static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
578  static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
579
580  static bool is_thread_cpu_time_supported();
581
582  // System loadavg support.  Returns -1 if load average cannot be obtained.
583  static int loadavg(double loadavg[], int nelem);
584
585  // Hook for os specific jvm options that we don't want to abort on seeing
586  static bool obsolete_option(const JavaVMOption *option);
587
588  // Platform dependent stuff
589  #include "incls/_os_pd.hpp.incl"
590
591  // debugging support (mostly used by debug.cpp but also fatal error handler)
592  static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
593
594  static bool dont_yield();                     // when true, JVM_Yield() is nop
595  static void print_statistics();
596
597  // Thread priority helpers (implemented in OS-specific part)
598  static OSReturn set_native_priority(Thread* thread, int native_prio);
599  static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
600  static int java_to_os_priority[MaxPriority + 1];
601  // Hint to the underlying OS that a task switch would not be good.
602  // Void return because it's a hint and can fail.
603  static void hint_no_preempt();
604
605  // Used at creation if requested by the diagnostic flag PauseAtStartup.
606  // Causes the VM to wait until an external stimulus has been applied
607  // (for Unix, that stimulus is a signal, for Windows, an external
608  // ResumeThread call)
609  static void pause();
610
611 protected:
612  static long _rand_seed;                   // seed for random number generator
613  static int _processor_count;              // number of processors
614
615  static char* format_boot_path(const char* format_string,
616                                const char* home,
617                                int home_len,
618                                char fileSep,
619                                char pathSep);
620  static bool set_boot_path(char fileSep, char pathSep);
621  static char** split_path(const char* path, int* n);
622};
623
624// Note that "PAUSE" is almost always used with synchronization
625// so arguably we should provide Atomic::SpinPause() instead
626// of the global SpinPause() with C linkage.
627// It'd also be eligible for inlining on many platforms.
628
629extern "C" int SpinPause () ;
630extern "C" int SafeFetch32 (int * adr, int errValue) ;
631extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
632