os.hpp revision 6856:5217fa82f1a4
1/*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_VM_RUNTIME_OS_HPP
26#define SHARE_VM_RUNTIME_OS_HPP
27
28#include "jvmtifiles/jvmti.h"
29#include "runtime/extendedPC.hpp"
30#include "runtime/handles.hpp"
31#include "utilities/top.hpp"
32#ifdef TARGET_OS_FAMILY_linux
33# include "jvm_linux.h"
34# include <setjmp.h>
35#endif
36#ifdef TARGET_OS_FAMILY_solaris
37# include "jvm_solaris.h"
38# include <setjmp.h>
39#endif
40#ifdef TARGET_OS_FAMILY_windows
41# include "jvm_windows.h"
42#endif
43#ifdef TARGET_OS_FAMILY_aix
44# include "jvm_aix.h"
45# include <setjmp.h>
46#endif
47#ifdef TARGET_OS_FAMILY_bsd
48# include "jvm_bsd.h"
49# include <setjmp.h>
50# ifdef __APPLE__
51#  include <mach/mach_time.h>
52# endif
53#endif
54
55class AgentLibrary;
56
57// os defines the interface to operating system; this includes traditional
58// OS services (time, I/O) as well as other functionality with system-
59// dependent code.
60
61typedef void (*dll_func)(...);
62
63class Thread;
64class JavaThread;
65class Event;
66class DLL;
67class FileHandle;
68class NativeCallStack;
69
70template<class E> class GrowableArray;
71
72// %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
73
74// Platform-independent error return values from OS functions
75enum OSReturn {
76  OS_OK         =  0,        // Operation was successful
77  OS_ERR        = -1,        // Operation failed
78  OS_INTRPT     = -2,        // Operation was interrupted
79  OS_TIMEOUT    = -3,        // Operation timed out
80  OS_NOMEM      = -5,        // Operation failed for lack of memory
81  OS_NORESOURCE = -6         // Operation failed for lack of nonmemory resource
82};
83
84enum ThreadPriority {        // JLS 20.20.1-3
85  NoPriority       = -1,     // Initial non-priority value
86  MinPriority      =  1,     // Minimum priority
87  NormPriority     =  5,     // Normal (non-daemon) priority
88  NearMaxPriority  =  9,     // High priority, used for VMThread
89  MaxPriority      = 10,     // Highest priority, used for WatcherThread
90                             // ensures that VMThread doesn't starve profiler
91  CriticalPriority = 11      // Critical thread priority
92};
93
94// Executable parameter flag for os::commit_memory() and
95// os::commit_memory_or_exit().
96const bool ExecMem = true;
97
98// Typedef for structured exception handling support
99typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
100
101class MallocTracker;
102
103class os: AllStatic {
104  friend class VMStructs;
105  friend class MallocTracker;
106 public:
107  enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
108
109 private:
110  static OSThread*          _starting_thread;
111  static address            _polling_page;
112  static volatile int32_t * _mem_serialize_page;
113  static uintptr_t          _serialize_page_mask;
114 public:
115  static size_t             _page_sizes[page_sizes_max];
116
117 private:
118  static void init_page_sizes(size_t default_page_size) {
119    _page_sizes[0] = default_page_size;
120    _page_sizes[1] = 0; // sentinel
121  }
122
123  static char*  pd_reserve_memory(size_t bytes, char* addr = 0,
124                               size_t alignment_hint = 0);
125  static char*  pd_attempt_reserve_memory_at(size_t bytes, char* addr);
126  static void   pd_split_reserved_memory(char *base, size_t size,
127                                      size_t split, bool realloc);
128  static bool   pd_commit_memory(char* addr, size_t bytes, bool executable);
129  static bool   pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
130                                 bool executable);
131  // Same as pd_commit_memory() that either succeeds or calls
132  // vm_exit_out_of_memory() with the specified mesg.
133  static void   pd_commit_memory_or_exit(char* addr, size_t bytes,
134                                         bool executable, const char* mesg);
135  static void   pd_commit_memory_or_exit(char* addr, size_t size,
136                                         size_t alignment_hint,
137                                         bool executable, const char* mesg);
138  static bool   pd_uncommit_memory(char* addr, size_t bytes);
139  static bool   pd_release_memory(char* addr, size_t bytes);
140
141  static char*  pd_map_memory(int fd, const char* file_name, size_t file_offset,
142                           char *addr, size_t bytes, bool read_only = false,
143                           bool allow_exec = false);
144  static char*  pd_remap_memory(int fd, const char* file_name, size_t file_offset,
145                             char *addr, size_t bytes, bool read_only,
146                             bool allow_exec);
147  static bool   pd_unmap_memory(char *addr, size_t bytes);
148  static void   pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
149  static void   pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
150
151
152 public:
153  static void init(void);                      // Called before command line parsing
154  static void init_before_ergo(void);          // Called after command line parsing
155                                               // before VM ergonomics processing.
156  static jint init_2(void);                    // Called after command line parsing
157                                               // and VM ergonomics processing
158  static void init_globals(void) {             // Called from init_globals() in init.cpp
159    init_globals_ext();
160  }
161  static void init_3(void);                    // Called at the end of vm init
162
163  // File names are case-insensitive on windows only
164  // Override me as needed
165  static int    file_name_strcmp(const char* s1, const char* s2);
166
167  // get/unset environment variable
168  static bool getenv(const char* name, char* buffer, int len);
169  static bool unsetenv(const char* name);
170
171  static bool have_special_privileges();
172
173  static jlong  javaTimeMillis();
174  static jlong  javaTimeNanos();
175  static void   javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
176  static void   run_periodic_checks();
177  static bool   supports_monotonic_clock();
178
179
180  // Returns the elapsed time in seconds since the vm started.
181  static double elapsedTime();
182
183  // Returns real time in seconds since an arbitrary point
184  // in the past.
185  static bool getTimesSecs(double* process_real_time,
186                           double* process_user_time,
187                           double* process_system_time);
188
189  // Interface to the performance counter
190  static jlong elapsed_counter();
191  static jlong elapsed_frequency();
192
193  // The "virtual time" of a thread is the amount of time a thread has
194  // actually run.  The first function indicates whether the OS supports
195  // this functionality for the current thread, and if so:
196  //   * the second enables vtime tracking (if that is required).
197  //   * the third tells whether vtime is enabled.
198  //   * the fourth returns the elapsed virtual time for the current
199  //     thread.
200  static bool supports_vtime();
201  static bool enable_vtime();
202  static bool vtime_enabled();
203  static double elapsedVTime();
204
205  // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
206  // It is MT safe, but not async-safe, as reading time zone
207  // information may require a lock on some platforms.
208  static char*      local_time_string(char *buf, size_t buflen);
209  static struct tm* localtime_pd     (const time_t* clock, struct tm*  res);
210  // Fill in buffer with current local time as an ISO-8601 string.
211  // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
212  // Returns buffer, or NULL if it failed.
213  static char* iso8601_time(char* buffer, size_t buffer_length);
214
215  // Interface for detecting multiprocessor system
216  static inline bool is_MP() {
217#if !INCLUDE_NMT
218    assert(_processor_count > 0, "invalid processor count");
219    return _processor_count > 1 || AssumeMP;
220#else
221    // NMT needs atomic operations before this initialization.
222    return true;
223#endif
224  }
225  static julong available_memory();
226  static julong physical_memory();
227  static bool has_allocatable_memory_limit(julong* limit);
228  static bool is_server_class_machine();
229
230  // number of CPUs
231  static int processor_count() {
232    return _processor_count;
233  }
234  static void set_processor_count(int count) { _processor_count = count; }
235
236  // Returns the number of CPUs this process is currently allowed to run on.
237  // Note that on some OSes this can change dynamically.
238  static int active_processor_count();
239
240  // Bind processes to processors.
241  //     This is a two step procedure:
242  //     first you generate a distribution of processes to processors,
243  //     then you bind processes according to that distribution.
244  // Compute a distribution for number of processes to processors.
245  //    Stores the processor id's into the distribution array argument.
246  //    Returns true if it worked, false if it didn't.
247  static bool distribute_processes(uint length, uint* distribution);
248  // Binds the current process to a processor.
249  //    Returns true if it worked, false if it didn't.
250  static bool bind_to_processor(uint processor_id);
251
252  // Give a name to the current thread.
253  static void set_native_thread_name(const char *name);
254
255  // Interface for stack banging (predetect possible stack overflow for
256  // exception processing)  There are guard pages, and above that shadow
257  // pages for stack overflow checking.
258  static bool uses_stack_guard_pages();
259  static bool allocate_stack_guard_pages();
260  static void bang_stack_shadow_pages();
261  static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
262
263  // OS interface to Virtual Memory
264
265  // Return the default page size.
266  static int    vm_page_size();
267
268  // Return the page size to use for a region of memory.  The min_pages argument
269  // is a hint intended to limit fragmentation; it says the returned page size
270  // should be <= region_max_size / min_pages.  Because min_pages is a hint,
271  // this routine may return a size larger than region_max_size / min_pages.
272  //
273  // The current implementation ignores min_pages if a larger page size is an
274  // exact multiple of both region_min_size and region_max_size.  This allows
275  // larger pages to be used when doing so would not cause fragmentation; in
276  // particular, a single page can be used when region_min_size ==
277  // region_max_size == a supported page size.
278  static size_t page_size_for_region(size_t region_min_size,
279                                     size_t region_max_size,
280                                     uint min_pages);
281  // Return the largest page size that can be used
282  static size_t max_page_size() {
283    // The _page_sizes array is sorted in descending order.
284    return _page_sizes[0];
285  }
286
287  // Methods for tracing page sizes returned by the above method; enabled by
288  // TracePageSizes.  The region_{min,max}_size parameters should be the values
289  // passed to page_size_for_region() and page_size should be the result of that
290  // call.  The (optional) base and size parameters should come from the
291  // ReservedSpace base() and size() methods.
292  static void trace_page_sizes(const char* str, const size_t* page_sizes,
293                               int count) PRODUCT_RETURN;
294  static void trace_page_sizes(const char* str, const size_t region_min_size,
295                               const size_t region_max_size,
296                               const size_t page_size,
297                               const char* base = NULL,
298                               const size_t size = 0) PRODUCT_RETURN;
299
300  static int    vm_allocation_granularity();
301  static char*  reserve_memory(size_t bytes, char* addr = 0,
302                               size_t alignment_hint = 0);
303  static char*  reserve_memory(size_t bytes, char* addr,
304                               size_t alignment_hint, MEMFLAGS flags);
305  static char*  reserve_memory_aligned(size_t size, size_t alignment);
306  static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
307  static void   split_reserved_memory(char *base, size_t size,
308                                      size_t split, bool realloc);
309  static bool   commit_memory(char* addr, size_t bytes, bool executable);
310  static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
311                              bool executable);
312  // Same as commit_memory() that either succeeds or calls
313  // vm_exit_out_of_memory() with the specified mesg.
314  static void   commit_memory_or_exit(char* addr, size_t bytes,
315                                      bool executable, const char* mesg);
316  static void   commit_memory_or_exit(char* addr, size_t size,
317                                      size_t alignment_hint,
318                                      bool executable, const char* mesg);
319  static bool   uncommit_memory(char* addr, size_t bytes);
320  static bool   release_memory(char* addr, size_t bytes);
321
322  enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
323  static bool   protect_memory(char* addr, size_t bytes, ProtType prot,
324                               bool is_committed = true);
325
326  static bool   guard_memory(char* addr, size_t bytes);
327  static bool   unguard_memory(char* addr, size_t bytes);
328  static bool   create_stack_guard_pages(char* addr, size_t bytes);
329  static bool   pd_create_stack_guard_pages(char* addr, size_t bytes);
330  static bool   remove_stack_guard_pages(char* addr, size_t bytes);
331
332  static char*  map_memory(int fd, const char* file_name, size_t file_offset,
333                           char *addr, size_t bytes, bool read_only = false,
334                           bool allow_exec = false);
335  static char*  remap_memory(int fd, const char* file_name, size_t file_offset,
336                             char *addr, size_t bytes, bool read_only,
337                             bool allow_exec);
338  static bool   unmap_memory(char *addr, size_t bytes);
339  static void   free_memory(char *addr, size_t bytes, size_t alignment_hint);
340  static void   realign_memory(char *addr, size_t bytes, size_t alignment_hint);
341
342  // NUMA-specific interface
343  static bool   numa_has_static_binding();
344  static bool   numa_has_group_homing();
345  static void   numa_make_local(char *addr, size_t bytes, int lgrp_hint);
346  static void   numa_make_global(char *addr, size_t bytes);
347  static size_t numa_get_groups_num();
348  static size_t numa_get_leaf_groups(int *ids, size_t size);
349  static bool   numa_topology_changed();
350  static int    numa_get_group_id();
351
352  // Page manipulation
353  struct page_info {
354    size_t size;
355    int lgrp_id;
356  };
357  static bool   get_page_info(char *start, page_info* info);
358  static char*  scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found);
359
360  static char*  non_memory_address_word();
361  // reserve, commit and pin the entire memory region
362  static char*  reserve_memory_special(size_t size, size_t alignment,
363                                       char* addr, bool executable);
364  static bool   release_memory_special(char* addr, size_t bytes);
365  static void   large_page_init();
366  static size_t large_page_size();
367  static bool   can_commit_large_page_memory();
368  static bool   can_execute_large_page_memory();
369
370  // OS interface to polling page
371  static address get_polling_page()             { return _polling_page; }
372  static void    set_polling_page(address page) { _polling_page = page; }
373  static bool    is_poll_address(address addr)  { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); }
374  static void    make_polling_page_unreadable();
375  static void    make_polling_page_readable();
376
377  // Routines used to serialize the thread state without using membars
378  static void    serialize_thread_states();
379
380  // Since we write to the serialize page from every thread, we
381  // want stores to be on unique cache lines whenever possible
382  // in order to minimize CPU cross talk.  We pre-compute the
383  // amount to shift the thread* to make this offset unique to
384  // each thread.
385  static int     get_serialize_page_shift_count() {
386    return SerializePageShiftCount;
387  }
388
389  static void     set_serialize_page_mask(uintptr_t mask) {
390    _serialize_page_mask = mask;
391  }
392
393  static unsigned int  get_serialize_page_mask() {
394    return _serialize_page_mask;
395  }
396
397  static void    set_memory_serialize_page(address page);
398
399  static address get_memory_serialize_page() {
400    return (address)_mem_serialize_page;
401  }
402
403  static inline void write_memory_serialize_page(JavaThread *thread) {
404    uintptr_t page_offset = ((uintptr_t)thread >>
405                            get_serialize_page_shift_count()) &
406                            get_serialize_page_mask();
407    *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
408  }
409
410  static bool    is_memory_serialize_page(JavaThread *thread, address addr) {
411    if (UseMembar) return false;
412    // Previously this function calculated the exact address of this
413    // thread's serialize page, and checked if the faulting address
414    // was equal.  However, some platforms mask off faulting addresses
415    // to the page size, so now we just check that the address is
416    // within the page.  This makes the thread argument unnecessary,
417    // but we retain the NULL check to preserve existing behavior.
418    if (thread == NULL) return false;
419    address page = (address) _mem_serialize_page;
420    return addr >= page && addr < (page + os::vm_page_size());
421  }
422
423  static void block_on_serialize_page_trap();
424
425  // threads
426
427  enum ThreadType {
428    vm_thread,
429    cgc_thread,        // Concurrent GC thread
430    pgc_thread,        // Parallel GC thread
431    java_thread,
432    compiler_thread,
433    watcher_thread,
434    os_thread
435  };
436
437  static bool create_thread(Thread* thread,
438                            ThreadType thr_type,
439                            size_t stack_size = 0);
440  static bool create_main_thread(JavaThread* thread);
441  static bool create_attached_thread(JavaThread* thread);
442  static void pd_start_thread(Thread* thread);
443  static void start_thread(Thread* thread);
444
445  static void initialize_thread(Thread* thr);
446  static void free_thread(OSThread* osthread);
447
448  // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit
449  static intx current_thread_id();
450  static int current_process_id();
451  static int sleep(Thread* thread, jlong ms, bool interruptable);
452  // Short standalone OS sleep suitable for slow path spin loop.
453  // Ignores Thread.interrupt() (so keep it short).
454  // ms = 0, will sleep for the least amount of time allowed by the OS.
455  static void naked_short_sleep(jlong ms);
456  static void infinite_sleep(); // never returns, use with CAUTION
457  static void naked_yield () ;
458  static OSReturn set_priority(Thread* thread, ThreadPriority priority);
459  static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
460
461  static void interrupt(Thread* thread);
462  static bool is_interrupted(Thread* thread, bool clear_interrupted);
463
464  static int pd_self_suspend_thread(Thread* thread);
465
466  static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
467  static frame      fetch_frame_from_context(void* ucVoid);
468
469  static ExtendedPC get_thread_pc(Thread *thread);
470  static void breakpoint();
471
472  static address current_stack_pointer();
473  static address current_stack_base();
474  static size_t current_stack_size();
475
476  static void verify_stack_alignment() PRODUCT_RETURN;
477
478  static int message_box(const char* title, const char* message);
479  static char* do_you_want_to_debug(const char* message);
480
481  // run cmd in a separate process and return its exit code; or -1 on failures
482  static int fork_and_exec(char *cmd);
483
484  // os::exit() is merged with vm_exit()
485  // static void exit(int num);
486
487  // Terminate the VM, but don't exit the process
488  static void shutdown();
489
490  // Terminate with an error.  Default is to generate a core file on platforms
491  // that support such things.  This calls shutdown() and then aborts.
492  static void abort(bool dump_core = true);
493
494  // Die immediately, no exit hook, no abort hook, no cleanup.
495  static void die();
496
497  // File i/o operations
498  static const int default_file_open_flags();
499  static int open(const char *path, int oflag, int mode);
500  static FILE* open(int fd, const char* mode);
501  static int close(int fd);
502  static jlong lseek(int fd, jlong offset, int whence);
503  static char* native_path(char *path);
504  static int ftruncate(int fd, jlong length);
505  static int fsync(int fd);
506  static int available(int fd, jlong *bytes);
507
508  //File i/o operations
509
510  static size_t read(int fd, void *buf, unsigned int nBytes);
511  static size_t restartable_read(int fd, void *buf, unsigned int nBytes);
512  static size_t write(int fd, const void *buf, unsigned int nBytes);
513
514  // Reading directories.
515  static DIR*           opendir(const char* dirname);
516  static int            readdir_buf_size(const char *path);
517  static struct dirent* readdir(DIR* dirp, dirent* dbuf);
518  static int            closedir(DIR* dirp);
519
520  // Dynamic library extension
521  static const char*    dll_file_extension();
522
523  static const char*    get_temp_directory();
524  static const char*    get_current_directory(char *buf, size_t buflen);
525
526  // Builds a platform-specific full library path given a ld path and lib name
527  // Returns true if buffer contains full path to existing file, false otherwise
528  static bool           dll_build_name(char* buffer, size_t size,
529                                       const char* pathname, const char* fname);
530
531  // Symbol lookup, find nearest function name; basically it implements
532  // dladdr() for all platforms. Name of the nearest function is copied
533  // to buf. Distance from its base address is optionally returned as offset.
534  // If function name is not found, buf[0] is set to '\0' and offset is
535  // set to -1 (if offset is non-NULL).
536  static bool dll_address_to_function_name(address addr, char* buf,
537                                           int buflen, int* offset);
538
539  // Locate DLL/DSO. On success, full path of the library is copied to
540  // buf, and offset is optionally set to be the distance between addr
541  // and the library's base address. On failure, buf[0] is set to '\0'
542  // and offset is set to -1 (if offset is non-NULL).
543  static bool dll_address_to_library_name(address addr, char* buf,
544                                          int buflen, int* offset);
545
546  // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
547  static bool address_is_in_vm(address addr);
548
549  // Loads .dll/.so and
550  // in case of error it checks if .dll/.so was built for the
551  // same architecture as HotSpot is running on
552  static void* dll_load(const char *name, char *ebuf, int ebuflen);
553
554  // lookup symbol in a shared library
555  static void* dll_lookup(void* handle, const char* name);
556
557  // Unload library
558  static void  dll_unload(void *lib);
559
560  // Return the handle of this process
561  static void* get_default_process_handle();
562
563  // Check for static linked agent library
564  static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
565                                 size_t syms_len);
566
567  // Find agent entry point
568  static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib,
569                                   const char *syms[], size_t syms_len);
570
571  // Print out system information; they are called by fatal error handler.
572  // Output format may be different on different platforms.
573  static void print_os_info(outputStream* st);
574  static void print_os_info_brief(outputStream* st);
575  static void print_cpu_info(outputStream* st);
576  static void pd_print_cpu_info(outputStream* st);
577  static void print_memory_info(outputStream* st);
578  static void print_dll_info(outputStream* st);
579  static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
580  static void print_context(outputStream* st, void* context);
581  static void print_register_info(outputStream* st, void* context);
582  static void print_siginfo(outputStream* st, void* siginfo);
583  static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
584  static void print_date_and_time(outputStream* st);
585
586  static void print_location(outputStream* st, intptr_t x, bool verbose = false);
587  static size_t lasterror(char *buf, size_t len);
588  static int get_last_error();
589
590  // Determines whether the calling process is being debugged by a user-mode debugger.
591  static bool is_debugger_attached();
592
593  // wait for a key press if PauseAtExit is set
594  static void wait_for_keypress_at_exit(void);
595
596  // The following two functions are used by fatal error handler to trace
597  // native (C) frames. They are not part of frame.hpp/frame.cpp because
598  // frame.hpp/cpp assume thread is JavaThread, and also because different
599  // OS/compiler may have different convention or provide different API to
600  // walk C frames.
601  //
602  // We don't attempt to become a debugger, so we only follow frames if that
603  // does not require a lookup in the unwind table, which is part of the binary
604  // file but may be unsafe to read after a fatal error. So on x86, we can
605  // only walk stack if %ebp is used as frame pointer; on ia64, it's not
606  // possible to walk C stack without having the unwind table.
607  static bool is_first_C_frame(frame *fr);
608  static frame get_sender_for_C_frame(frame *fr);
609
610  // return current frame. pc() and sp() are set to NULL on failure.
611  static frame      current_frame();
612
613  static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
614
615  // returns a string to describe the exception/signal;
616  // returns NULL if exception_code is not an OS exception/signal.
617  static const char* exception_name(int exception_code, char* buf, size_t buflen);
618
619  // Returns native Java library, loads if necessary
620  static void*    native_java_library();
621
622  // Fills in path to jvm.dll/libjvm.so (used by the Disassembler)
623  static void     jvm_path(char *buf, jint buflen);
624
625  // Returns true if we are running in a headless jre.
626  static bool     is_headless_jre();
627
628  // JNI names
629  static void     print_jni_name_prefix_on(outputStream* st, int args_size);
630  static void     print_jni_name_suffix_on(outputStream* st, int args_size);
631
632  // Init os specific system properties values
633  static void init_system_properties_values();
634
635  // IO operations, non-JVM_ version.
636  static int stat(const char* path, struct stat* sbuf);
637  static bool dir_is_empty(const char* path);
638
639  // IO operations on binary files
640  static int create_binary_file(const char* path, bool rewrite_existing);
641  static jlong current_file_offset(int fd);
642  static jlong seek_to_file_offset(int fd, jlong offset);
643
644  // Thread Local Storage
645  static int   allocate_thread_local_storage();
646  static void  thread_local_storage_at_put(int index, void* value);
647  static void* thread_local_storage_at(int index);
648  static void  free_thread_local_storage(int index);
649
650  // Retrieve native stack frames.
651  // Parameter:
652  //   stack:  an array to storage stack pointers.
653  //   frames: size of above array.
654  //   toSkip: number of stack frames to skip at the beginning.
655  // Return: number of stack frames captured.
656  static int get_native_stack(address* stack, int size, int toSkip = 0);
657
658  // General allocation (must be MT-safe)
659  static void* malloc  (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
660  static void* malloc  (size_t size, MEMFLAGS flags);
661  static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
662  static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
663
664  static void  free    (void *memblock, MEMFLAGS flags = mtNone);
665  static bool  check_heap(bool force = false);      // verify C heap integrity
666  static char* strdup(const char *, MEMFLAGS flags = mtInternal);  // Like strdup
667  // Like strdup, but exit VM when strdup() returns NULL
668  static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal);
669
670#ifndef PRODUCT
671  static julong num_mallocs;         // # of calls to malloc/realloc
672  static julong alloc_bytes;         // # of bytes allocated
673  static julong num_frees;           // # of calls to free
674  static julong free_bytes;          // # of bytes freed
675#endif
676
677  // SocketInterface (ex HPI SocketInterface )
678  static int socket(int domain, int type, int protocol);
679  static int socket_close(int fd);
680  static int socket_shutdown(int fd, int howto);
681  static int recv(int fd, char* buf, size_t nBytes, uint flags);
682  static int send(int fd, char* buf, size_t nBytes, uint flags);
683  static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
684  static int timeout(int fd, long timeout);
685  static int listen(int fd, int count);
686  static int connect(int fd, struct sockaddr* him, socklen_t len);
687  static int bind(int fd, struct sockaddr* him, socklen_t len);
688  static int accept(int fd, struct sockaddr* him, socklen_t* len);
689  static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
690                      struct sockaddr* from, socklen_t* fromlen);
691  static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
692  static int sendto(int fd, char* buf, size_t len, uint flags,
693                    struct sockaddr* to, socklen_t tolen);
694  static int socket_available(int fd, jint* pbytes);
695
696  static int get_sock_opt(int fd, int level, int optname,
697                          char* optval, socklen_t* optlen);
698  static int set_sock_opt(int fd, int level, int optname,
699                          const char* optval, socklen_t optlen);
700  static int get_host_name(char* name, int namelen);
701
702  static struct hostent* get_host_by_name(char* name);
703
704  // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
705  static void  signal_init();
706  static void  signal_init_pd();
707  static void  signal_notify(int signal_number);
708  static void* signal(int signal_number, void* handler);
709  static void  signal_raise(int signal_number);
710  static int   signal_wait();
711  static int   signal_lookup();
712  static void* user_handler();
713  static void  terminate_signal_thread();
714  static int   sigexitnum_pd();
715
716  // random number generation
717  static long random();                    // return 32bit pseudorandom number
718  static void init_random(long initval);   // initialize random sequence
719
720  // Structured OS Exception support
721  static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
722
723  // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits
724  static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize);
725
726  // Get the default path to the core file
727  // Returns the length of the string
728  static int get_core_path(char* buffer, size_t bufferSize);
729
730  // JVMTI & JVM monitoring and management support
731  // The thread_cpu_time() and current_thread_cpu_time() are only
732  // supported if is_thread_cpu_time_supported() returns true.
733  // They are not supported on Solaris T1.
734
735  // Thread CPU Time - return the fast estimate on a platform
736  // On Solaris - call gethrvtime (fast) - user time only
737  // On Linux   - fast clock_gettime where available - user+sys
738  //            - otherwise: very slow /proc fs - user+sys
739  // On Windows - GetThreadTimes - user+sys
740  static jlong current_thread_cpu_time();
741  static jlong thread_cpu_time(Thread* t);
742
743  // Thread CPU Time with user_sys_cpu_time parameter.
744  //
745  // If user_sys_cpu_time is true, user+sys time is returned.
746  // Otherwise, only user time is returned
747  static jlong current_thread_cpu_time(bool user_sys_cpu_time);
748  static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time);
749
750  // Return a bunch of info about the timers.
751  // Note that the returned info for these two functions may be different
752  // on some platforms
753  static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
754  static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
755
756  static bool is_thread_cpu_time_supported();
757
758  // System loadavg support.  Returns -1 if load average cannot be obtained.
759  static int loadavg(double loadavg[], int nelem);
760
761  // Hook for os specific jvm options that we don't want to abort on seeing
762  static bool obsolete_option(const JavaVMOption *option);
763
764  // Extensions
765#include "runtime/os_ext.hpp"
766
767 public:
768  class CrashProtectionCallback : public StackObj {
769  public:
770    virtual void call() = 0;
771  };
772
773  // Platform dependent stuff
774#ifdef TARGET_OS_FAMILY_linux
775# include "os_linux.hpp"
776# include "os_posix.hpp"
777#endif
778#ifdef TARGET_OS_FAMILY_solaris
779# include "os_solaris.hpp"
780# include "os_posix.hpp"
781#endif
782#ifdef TARGET_OS_FAMILY_windows
783# include "os_windows.hpp"
784#endif
785#ifdef TARGET_OS_FAMILY_aix
786# include "os_aix.hpp"
787# include "os_posix.hpp"
788#endif
789#ifdef TARGET_OS_FAMILY_bsd
790# include "os_posix.hpp"
791# include "os_bsd.hpp"
792#endif
793#ifdef TARGET_OS_ARCH_linux_x86
794# include "os_linux_x86.hpp"
795#endif
796#ifdef TARGET_OS_ARCH_linux_sparc
797# include "os_linux_sparc.hpp"
798#endif
799#ifdef TARGET_OS_ARCH_linux_zero
800# include "os_linux_zero.hpp"
801#endif
802#ifdef TARGET_OS_ARCH_solaris_x86
803# include "os_solaris_x86.hpp"
804#endif
805#ifdef TARGET_OS_ARCH_solaris_sparc
806# include "os_solaris_sparc.hpp"
807#endif
808#ifdef TARGET_OS_ARCH_windows_x86
809# include "os_windows_x86.hpp"
810#endif
811#ifdef TARGET_OS_ARCH_linux_arm
812# include "os_linux_arm.hpp"
813#endif
814#ifdef TARGET_OS_ARCH_linux_ppc
815# include "os_linux_ppc.hpp"
816#endif
817#ifdef TARGET_OS_ARCH_aix_ppc
818# include "os_aix_ppc.hpp"
819#endif
820#ifdef TARGET_OS_ARCH_bsd_x86
821# include "os_bsd_x86.hpp"
822#endif
823#ifdef TARGET_OS_ARCH_bsd_zero
824# include "os_bsd_zero.hpp"
825#endif
826
827#ifndef OS_NATIVE_THREAD_CREATION_FAILED_MSG
828#define OS_NATIVE_THREAD_CREATION_FAILED_MSG "unable to create native thread: possibly out of memory or process/resource limits reached"
829#endif
830
831 public:
832#ifndef PLATFORM_PRINT_NATIVE_STACK
833  // No platform-specific code for printing the native stack.
834  static bool platform_print_native_stack(outputStream* st, void* context,
835                                          char *buf, int buf_size) {
836    return false;
837  }
838#endif
839
840  // debugging support (mostly used by debug.cpp but also fatal error handler)
841  static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
842
843  static bool dont_yield();                     // when true, JVM_Yield() is nop
844  static void print_statistics();
845
846  // Thread priority helpers (implemented in OS-specific part)
847  static OSReturn set_native_priority(Thread* thread, int native_prio);
848  static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
849  static int java_to_os_priority[CriticalPriority + 1];
850  // Hint to the underlying OS that a task switch would not be good.
851  // Void return because it's a hint and can fail.
852  static void hint_no_preempt();
853  static const char* native_thread_creation_failed_msg() {
854    return OS_NATIVE_THREAD_CREATION_FAILED_MSG;
855  }
856
857  // Used at creation if requested by the diagnostic flag PauseAtStartup.
858  // Causes the VM to wait until an external stimulus has been applied
859  // (for Unix, that stimulus is a signal, for Windows, an external
860  // ResumeThread call)
861  static void pause();
862
863  // Builds a platform dependent Agent_OnLoad_<libname> function name
864  // which is used to find statically linked in agents.
865  static char*  build_agent_function_name(const char *sym, const char *cname,
866                                          bool is_absolute_path);
867
868  class SuspendedThreadTaskContext {
869  public:
870    SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
871    Thread* thread() const { return _thread; }
872    void* ucontext() const { return _ucontext; }
873  private:
874    Thread* _thread;
875    void* _ucontext;
876  };
877
878  class SuspendedThreadTask {
879  public:
880    SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
881    virtual ~SuspendedThreadTask() {}
882    void run();
883    bool is_done() { return _done; }
884    virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
885  protected:
886  private:
887    void internal_do_task();
888    Thread* _thread;
889    bool _done;
890  };
891
892#ifndef TARGET_OS_FAMILY_windows
893  // Suspend/resume support
894  // Protocol:
895  //
896  // a thread starts in SR_RUNNING
897  //
898  // SR_RUNNING can go to
899  //   * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
900  // SR_SUSPEND_REQUEST can go to
901  //   * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
902  //   * SR_SUSPENDED if the stopped thread receives the signal and switches state
903  // SR_SUSPENDED can go to
904  //   * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
905  // SR_WAKEUP_REQUEST can go to
906  //   * SR_RUNNING when the stopped thread receives the signal
907  //   * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
908  class SuspendResume {
909   public:
910    enum State {
911      SR_RUNNING,
912      SR_SUSPEND_REQUEST,
913      SR_SUSPENDED,
914      SR_WAKEUP_REQUEST
915    };
916
917  private:
918    volatile State _state;
919
920  private:
921    /* try to switch state from state "from" to state "to"
922     * returns the state set after the method is complete
923     */
924    State switch_state(State from, State to);
925
926  public:
927    SuspendResume() : _state(SR_RUNNING) { }
928
929    State state() const { return _state; }
930
931    State request_suspend() {
932      return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
933    }
934
935    State cancel_suspend() {
936      return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
937    }
938
939    State suspended() {
940      return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
941    }
942
943    State request_wakeup() {
944      return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
945    }
946
947    State running() {
948      return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
949    }
950
951    bool is_running() const {
952      return _state == SR_RUNNING;
953    }
954
955    bool is_suspend_request() const {
956      return _state == SR_SUSPEND_REQUEST;
957    }
958
959    bool is_suspended() const {
960      return _state == SR_SUSPENDED;
961    }
962  };
963#endif
964
965
966 protected:
967  static long _rand_seed;                   // seed for random number generator
968  static int _processor_count;              // number of processors
969
970  static char* format_boot_path(const char* format_string,
971                                const char* home,
972                                int home_len,
973                                char fileSep,
974                                char pathSep);
975  static bool set_boot_path(char fileSep, char pathSep);
976  static char** split_path(const char* path, int* n);
977
978};
979
980// Note that "PAUSE" is almost always used with synchronization
981// so arguably we should provide Atomic::SpinPause() instead
982// of the global SpinPause() with C linkage.
983// It'd also be eligible for inlining on many platforms.
984
985extern "C" int SpinPause();
986
987#endif // SHARE_VM_RUNTIME_OS_HPP
988