os_aix.cpp revision 6419:9ea2d010f47c
1/*
2 * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libperfstat_aix.hpp"
40#include "loadlib_aix.hpp"
41#include "memory/allocation.inline.hpp"
42#include "memory/filemap.hpp"
43#include "mutex_aix.inline.hpp"
44#include "oops/oop.inline.hpp"
45#include "os_share_aix.hpp"
46#include "porting_aix.hpp"
47#include "prims/jniFastGetField.hpp"
48#include "prims/jvm.h"
49#include "prims/jvm_misc.hpp"
50#include "runtime/arguments.hpp"
51#include "runtime/extendedPC.hpp"
52#include "runtime/globals.hpp"
53#include "runtime/interfaceSupport.hpp"
54#include "runtime/java.hpp"
55#include "runtime/javaCalls.hpp"
56#include "runtime/mutexLocker.hpp"
57#include "runtime/objectMonitor.hpp"
58#include "runtime/orderAccess.inline.hpp"
59#include "runtime/osThread.hpp"
60#include "runtime/perfMemory.hpp"
61#include "runtime/sharedRuntime.hpp"
62#include "runtime/statSampler.hpp"
63#include "runtime/stubRoutines.hpp"
64#include "runtime/thread.inline.hpp"
65#include "runtime/threadCritical.hpp"
66#include "runtime/timer.hpp"
67#include "services/attachListener.hpp"
68#include "services/runtimeService.hpp"
69#include "utilities/decoder.hpp"
70#include "utilities/defaultStream.hpp"
71#include "utilities/events.hpp"
72#include "utilities/growableArray.hpp"
73#include "utilities/vmError.hpp"
74
75// put OS-includes here (sorted alphabetically)
76#include <errno.h>
77#include <fcntl.h>
78#include <inttypes.h>
79#include <poll.h>
80#include <procinfo.h>
81#include <pthread.h>
82#include <pwd.h>
83#include <semaphore.h>
84#include <signal.h>
85#include <stdint.h>
86#include <stdio.h>
87#include <string.h>
88#include <unistd.h>
89#include <sys/ioctl.h>
90#include <sys/ipc.h>
91#include <sys/mman.h>
92#include <sys/resource.h>
93#include <sys/select.h>
94#include <sys/shm.h>
95#include <sys/socket.h>
96#include <sys/stat.h>
97#include <sys/sysinfo.h>
98#include <sys/systemcfg.h>
99#include <sys/time.h>
100#include <sys/times.h>
101#include <sys/types.h>
102#include <sys/utsname.h>
103#include <sys/vminfo.h>
104#include <sys/wait.h>
105
106// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
107#if !defined(_AIXVERSION_610)
108extern "C" {
109  int getthrds64(pid_t ProcessIdentifier,
110                 struct thrdentry64* ThreadBuffer,
111                 int ThreadSize,
112                 tid64_t* IndexPointer,
113                 int Count);
114}
115#endif
116
117// Excerpts from systemcfg.h definitions newer than AIX 5.3
118#ifndef PV_7
119# define PV_7 0x200000          // Power PC 7
120# define PV_7_Compat 0x208000   // Power PC 7
121#endif
122
123#define MAX_PATH (2 * K)
124
125// for timer info max values which include all bits
126#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
127// for multipage initialization error analysis (in 'g_multipage_error')
128#define ERROR_MP_OS_TOO_OLD                          100
129#define ERROR_MP_EXTSHM_ACTIVE                       101
130#define ERROR_MP_VMGETINFO_FAILED                    102
131#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
132
133// the semantics in this file are thus that codeptr_t is a *real code ptr*
134// This means that any function taking codeptr_t as arguments will assume
135// a real codeptr and won't handle function descriptors (eg getFuncName),
136// whereas functions taking address as args will deal with function
137// descriptors (eg os::dll_address_to_library_name)
138typedef unsigned int* codeptr_t;
139
140// typedefs for stackslots, stack pointers, pointers to op codes
141typedef unsigned long stackslot_t;
142typedef stackslot_t* stackptr_t;
143
144// query dimensions of the stack of the calling thread
145static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
146
147// function to check a given stack pointer against given stack limits
148inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
149  if (((uintptr_t)sp) & 0x7) {
150    return false;
151  }
152  if (sp > stack_base) {
153    return false;
154  }
155  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
156    return false;
157  }
158  return true;
159}
160
161// returns true if function is a valid codepointer
162inline bool is_valid_codepointer(codeptr_t p) {
163  if (!p) {
164    return false;
165  }
166  if (((uintptr_t)p) & 0x3) {
167    return false;
168  }
169  if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
170    return false;
171  }
172  return true;
173}
174
175// macro to check a given stack pointer against given stack limits and to die if test fails
176#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
177    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
178}
179
180// macro to check the current stack pointer against given stacklimits
181#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
182  address sp; \
183  sp = os::current_stack_pointer(); \
184  CHECK_STACK_PTR(sp, stack_base, stack_size); \
185}
186
187////////////////////////////////////////////////////////////////////////////////
188// global variables (for a description see os_aix.hpp)
189
190julong    os::Aix::_physical_memory = 0;
191pthread_t os::Aix::_main_thread = ((pthread_t)0);
192int       os::Aix::_page_size = -1;
193int       os::Aix::_on_pase = -1;
194int       os::Aix::_os_version = -1;
195int       os::Aix::_stack_page_size = -1;
196size_t    os::Aix::_shm_default_page_size = -1;
197int       os::Aix::_can_use_64K_pages = -1;
198int       os::Aix::_can_use_16M_pages = -1;
199int       os::Aix::_xpg_sus_mode = -1;
200int       os::Aix::_extshm = -1;
201int       os::Aix::_logical_cpus = -1;
202
203////////////////////////////////////////////////////////////////////////////////
204// local variables
205
206static int      g_multipage_error  = -1;   // error analysis for multipage initialization
207static jlong    initial_time_count = 0;
208static int      clock_tics_per_sec = 100;
209static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
210static bool     check_signals      = true;
211static pid_t    _initial_pid       = 0;
212static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
213static sigset_t SR_sigset;
214static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
215
216julong os::available_memory() {
217  return Aix::available_memory();
218}
219
220julong os::Aix::available_memory() {
221  os::Aix::meminfo_t mi;
222  if (os::Aix::get_meminfo(&mi)) {
223    return mi.real_free;
224  } else {
225    return 0xFFFFFFFFFFFFFFFFLL;
226  }
227}
228
229julong os::physical_memory() {
230  return Aix::physical_memory();
231}
232
233////////////////////////////////////////////////////////////////////////////////
234// environment support
235
236bool os::getenv(const char* name, char* buf, int len) {
237  const char* val = ::getenv(name);
238  if (val != NULL && strlen(val) < (size_t)len) {
239    strcpy(buf, val);
240    return true;
241  }
242  if (len > 0) buf[0] = 0;  // return a null string
243  return false;
244}
245
246
247// Return true if user is running as root.
248
249bool os::have_special_privileges() {
250  static bool init = false;
251  static bool privileges = false;
252  if (!init) {
253    privileges = (getuid() != geteuid()) || (getgid() != getegid());
254    init = true;
255  }
256  return privileges;
257}
258
259// Helper function, emulates disclaim64 using multiple 32bit disclaims
260// because we cannot use disclaim64() on AS/400 and old AIX releases.
261static bool my_disclaim64(char* addr, size_t size) {
262
263  if (size == 0) {
264    return true;
265  }
266
267  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
268  const unsigned int maxDisclaimSize = 0x80000000;
269
270  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
271  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
272
273  char* p = addr;
274
275  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
276    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
277      //if (Verbose)
278      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
279      return false;
280    }
281    p += maxDisclaimSize;
282  }
283
284  if (lastDisclaimSize > 0) {
285    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
286      //if (Verbose)
287        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
288      return false;
289    }
290  }
291
292  return true;
293}
294
295// Cpu architecture string
296#if defined(PPC32)
297static char cpu_arch[] = "ppc";
298#elif defined(PPC64)
299static char cpu_arch[] = "ppc64";
300#else
301#error Add appropriate cpu_arch setting
302#endif
303
304
305// Given an address, returns the size of the page backing that address.
306size_t os::Aix::query_pagesize(void* addr) {
307
308  vm_page_info pi;
309  pi.addr = (uint64_t)addr;
310  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
311    return pi.pagesize;
312  } else {
313    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
314    assert(false, "vmgetinfo failed to retrieve page size");
315    return SIZE_4K;
316  }
317
318}
319
320// Returns the kernel thread id of the currently running thread.
321pid_t os::Aix::gettid() {
322  return (pid_t) thread_self();
323}
324
325void os::Aix::initialize_system_info() {
326
327  // get the number of online(logical) cpus instead of configured
328  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
329  assert(_processor_count > 0, "_processor_count must be > 0");
330
331  // retrieve total physical storage
332  os::Aix::meminfo_t mi;
333  if (!os::Aix::get_meminfo(&mi)) {
334    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
335    assert(false, "os::Aix::get_meminfo failed.");
336  }
337  _physical_memory = (julong) mi.real_total;
338}
339
340// Helper function for tracing page sizes.
341static const char* describe_pagesize(size_t pagesize) {
342  switch (pagesize) {
343    case SIZE_4K : return "4K";
344    case SIZE_64K: return "64K";
345    case SIZE_16M: return "16M";
346    case SIZE_16G: return "16G";
347    default:
348      assert(false, "surprise");
349      return "??";
350  }
351}
352
353// Retrieve information about multipage size support. Will initialize
354// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
355// Aix::_can_use_16M_pages.
356// Must be called before calling os::large_page_init().
357void os::Aix::query_multipage_support() {
358
359  guarantee(_page_size == -1 &&
360            _stack_page_size == -1 &&
361            _can_use_64K_pages == -1 &&
362            _can_use_16M_pages == -1 &&
363            g_multipage_error == -1,
364            "do not call twice");
365
366  _page_size = ::sysconf(_SC_PAGESIZE);
367
368  // This really would surprise me.
369  assert(_page_size == SIZE_4K, "surprise!");
370
371
372  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
373  // Default data page size is influenced either by linker options (-bdatapsize)
374  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
375  // default should be 4K.
376  size_t data_page_size = SIZE_4K;
377  {
378    void* p = ::malloc(SIZE_16M);
379    guarantee(p != NULL, "malloc failed");
380    data_page_size = os::Aix::query_pagesize(p);
381    ::free(p);
382  }
383
384  // query default shm page size (LDR_CNTRL SHMPSIZE)
385  {
386    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
387    guarantee(shmid != -1, "shmget failed");
388    void* p = ::shmat(shmid, NULL, 0);
389    ::shmctl(shmid, IPC_RMID, NULL);
390    guarantee(p != (void*) -1, "shmat failed");
391    _shm_default_page_size = os::Aix::query_pagesize(p);
392    ::shmdt(p);
393  }
394
395  // before querying the stack page size, make sure we are not running as primordial
396  // thread (because primordial thread's stack may have different page size than
397  // pthread thread stacks). Running a VM on the primordial thread won't work for a
398  // number of reasons so we may just as well guarantee it here
399  guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
400
401  // query stack page size
402  {
403    int dummy = 0;
404    _stack_page_size = os::Aix::query_pagesize(&dummy);
405    // everything else would surprise me and should be looked into
406    guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
407    // also, just for completeness: pthread stacks are allocated from C heap, so
408    // stack page size should be the same as data page size
409    guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
410  }
411
412  // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
413  // for system V shm.
414  if (Aix::extshm()) {
415    if (Verbose) {
416      fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
417                      "Please make sure EXTSHM is OFF for large page support.\n");
418    }
419    g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
420    _can_use_64K_pages = _can_use_16M_pages = 0;
421    goto query_multipage_support_end;
422  }
423
424  // now check which page sizes the OS claims it supports, and of those, which actually can be used.
425  {
426    const int MAX_PAGE_SIZES = 4;
427    psize_t sizes[MAX_PAGE_SIZES];
428    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
429    if (num_psizes == -1) {
430      if (Verbose) {
431        fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
432        fprintf(stderr, "disabling multipage support.\n");
433      }
434      g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
435      _can_use_64K_pages = _can_use_16M_pages = 0;
436      goto query_multipage_support_end;
437    }
438    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
439    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
440    if (Verbose) {
441      fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
442      for (int i = 0; i < num_psizes; i ++) {
443        fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
444      }
445      fprintf(stderr, " .\n");
446    }
447
448    // Can we use 64K, 16M pages?
449    _can_use_64K_pages = 0;
450    _can_use_16M_pages = 0;
451    for (int i = 0; i < num_psizes; i ++) {
452      if (sizes[i] == SIZE_64K) {
453        _can_use_64K_pages = 1;
454      } else if (sizes[i] == SIZE_16M) {
455        _can_use_16M_pages = 1;
456      }
457    }
458
459    if (!_can_use_64K_pages) {
460      g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
461    }
462
463    // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
464    // there must be an actual 16M page pool, and we must run with enough rights.
465    if (_can_use_16M_pages) {
466      const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
467      guarantee(shmid != -1, "shmget failed");
468      struct shmid_ds shm_buf = { 0 };
469      shm_buf.shm_pagesize = SIZE_16M;
470      const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
471      const int en = errno;
472      ::shmctl(shmid, IPC_RMID, NULL);
473      if (!can_set_pagesize) {
474        if (Verbose) {
475          fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
476                          "Will deactivate 16M support.\n", en, strerror(en));
477        }
478        _can_use_16M_pages = 0;
479      }
480    }
481
482  } // end: check which pages can be used for shared memory
483
484query_multipage_support_end:
485
486  guarantee(_page_size != -1 &&
487            _stack_page_size != -1 &&
488            _can_use_64K_pages != -1 &&
489            _can_use_16M_pages != -1, "Page sizes not properly initialized");
490
491  if (_can_use_64K_pages) {
492    g_multipage_error = 0;
493  }
494
495  if (Verbose) {
496    fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
497    fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
498    fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
499    fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
500    fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
501    fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
502  }
503
504} // end os::Aix::query_multipage_support()
505
506// The code for this method was initially derived from the version in os_linux.cpp.
507void os::init_system_properties_values() {
508
509#define DEFAULT_LIBPATH "/usr/lib:/lib"
510#define EXTENSIONS_DIR  "/lib/ext"
511#define ENDORSED_DIR    "/lib/endorsed"
512
513  // Buffer that fits several sprintfs.
514  // Note that the space for the trailing null is provided
515  // by the nulls included by the sizeof operator.
516  const size_t bufsize =
517    MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
518         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
519         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
520  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
521
522  // sysclasspath, java_home, dll_dir
523  {
524    char *pslash;
525    os::jvm_path(buf, bufsize);
526
527    // Found the full path to libjvm.so.
528    // Now cut the path to <java_home>/jre if we can.
529    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
530    pslash = strrchr(buf, '/');
531    if (pslash != NULL) {
532      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
533    }
534    Arguments::set_dll_dir(buf);
535
536    if (pslash != NULL) {
537      pslash = strrchr(buf, '/');
538      if (pslash != NULL) {
539        *pslash = '\0';          // Get rid of /<arch>.
540        pslash = strrchr(buf, '/');
541        if (pslash != NULL) {
542          *pslash = '\0';        // Get rid of /lib.
543        }
544      }
545    }
546    Arguments::set_java_home(buf);
547    set_boot_path('/', ':');
548  }
549
550  // Where to look for native libraries.
551
552  // On Aix we get the user setting of LIBPATH.
553  // Eventually, all the library path setting will be done here.
554  // Get the user setting of LIBPATH.
555  const char *v = ::getenv("LIBPATH");
556  const char *v_colon = ":";
557  if (v == NULL) { v = ""; v_colon = ""; }
558
559  // Concatenate user and invariant part of ld_library_path.
560  // That's +1 for the colon and +1 for the trailing '\0'.
561  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
562  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
563  Arguments::set_library_path(ld_library_path);
564  FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
565
566  // Extensions directories.
567  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
568  Arguments::set_ext_dirs(buf);
569
570  // Endorsed standards default directory.
571  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
572  Arguments::set_endorsed_dirs(buf);
573
574  FREE_C_HEAP_ARRAY(char, buf, mtInternal);
575
576#undef DEFAULT_LIBPATH
577#undef EXTENSIONS_DIR
578#undef ENDORSED_DIR
579}
580
581////////////////////////////////////////////////////////////////////////////////
582// breakpoint support
583
584void os::breakpoint() {
585  BREAKPOINT;
586}
587
588extern "C" void breakpoint() {
589  // use debugger to set breakpoint here
590}
591
592////////////////////////////////////////////////////////////////////////////////
593// signal support
594
595debug_only(static bool signal_sets_initialized = false);
596static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
597
598bool os::Aix::is_sig_ignored(int sig) {
599  struct sigaction oact;
600  sigaction(sig, (struct sigaction*)NULL, &oact);
601  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
602    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
603  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
604    return true;
605  else
606    return false;
607}
608
609void os::Aix::signal_sets_init() {
610  // Should also have an assertion stating we are still single-threaded.
611  assert(!signal_sets_initialized, "Already initialized");
612  // Fill in signals that are necessarily unblocked for all threads in
613  // the VM. Currently, we unblock the following signals:
614  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
615  //                         by -Xrs (=ReduceSignalUsage));
616  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
617  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
618  // the dispositions or masks wrt these signals.
619  // Programs embedding the VM that want to use the above signals for their
620  // own purposes must, at this time, use the "-Xrs" option to prevent
621  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
622  // (See bug 4345157, and other related bugs).
623  // In reality, though, unblocking these signals is really a nop, since
624  // these signals are not blocked by default.
625  sigemptyset(&unblocked_sigs);
626  sigemptyset(&allowdebug_blocked_sigs);
627  sigaddset(&unblocked_sigs, SIGILL);
628  sigaddset(&unblocked_sigs, SIGSEGV);
629  sigaddset(&unblocked_sigs, SIGBUS);
630  sigaddset(&unblocked_sigs, SIGFPE);
631  sigaddset(&unblocked_sigs, SIGTRAP);
632  sigaddset(&unblocked_sigs, SIGDANGER);
633  sigaddset(&unblocked_sigs, SR_signum);
634
635  if (!ReduceSignalUsage) {
636   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
637     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
638     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
639   }
640   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
641     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
642     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
643   }
644   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
645     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
646     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
647   }
648  }
649  // Fill in signals that are blocked by all but the VM thread.
650  sigemptyset(&vm_sigs);
651  if (!ReduceSignalUsage)
652    sigaddset(&vm_sigs, BREAK_SIGNAL);
653  debug_only(signal_sets_initialized = true);
654}
655
656// These are signals that are unblocked while a thread is running Java.
657// (For some reason, they get blocked by default.)
658sigset_t* os::Aix::unblocked_signals() {
659  assert(signal_sets_initialized, "Not initialized");
660  return &unblocked_sigs;
661}
662
663// These are the signals that are blocked while a (non-VM) thread is
664// running Java. Only the VM thread handles these signals.
665sigset_t* os::Aix::vm_signals() {
666  assert(signal_sets_initialized, "Not initialized");
667  return &vm_sigs;
668}
669
670// These are signals that are blocked during cond_wait to allow debugger in
671sigset_t* os::Aix::allowdebug_blocked_signals() {
672  assert(signal_sets_initialized, "Not initialized");
673  return &allowdebug_blocked_sigs;
674}
675
676void os::Aix::hotspot_sigmask(Thread* thread) {
677
678  //Save caller's signal mask before setting VM signal mask
679  sigset_t caller_sigmask;
680  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
681
682  OSThread* osthread = thread->osthread();
683  osthread->set_caller_sigmask(caller_sigmask);
684
685  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
686
687  if (!ReduceSignalUsage) {
688    if (thread->is_VM_thread()) {
689      // Only the VM thread handles BREAK_SIGNAL ...
690      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
691    } else {
692      // ... all other threads block BREAK_SIGNAL
693      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
694    }
695  }
696}
697
698// retrieve memory information.
699// Returns false if something went wrong;
700// content of pmi undefined in this case.
701bool os::Aix::get_meminfo(meminfo_t* pmi) {
702
703  assert(pmi, "get_meminfo: invalid parameter");
704
705  memset(pmi, 0, sizeof(meminfo_t));
706
707  if (os::Aix::on_pase()) {
708
709    Unimplemented();
710    return false;
711
712  } else {
713
714    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
715    // See:
716    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
717    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
718    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
719    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
720
721    perfstat_memory_total_t psmt;
722    memset (&psmt, '\0', sizeof(psmt));
723    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
724    if (rc == -1) {
725      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
726      assert(0, "perfstat_memory_total() failed");
727      return false;
728    }
729
730    assert(rc == 1, "perfstat_memory_total() - weird return code");
731
732    // excerpt from
733    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
734    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
735    // The fields of perfstat_memory_total_t:
736    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
737    // u_longlong_t real_total         Total real memory (in 4 KB pages).
738    // u_longlong_t real_free          Free real memory (in 4 KB pages).
739    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
740    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
741
742    pmi->virt_total = psmt.virt_total * 4096;
743    pmi->real_total = psmt.real_total * 4096;
744    pmi->real_free = psmt.real_free * 4096;
745    pmi->pgsp_total = psmt.pgsp_total * 4096;
746    pmi->pgsp_free = psmt.pgsp_free * 4096;
747
748    return true;
749
750  }
751} // end os::Aix::get_meminfo
752
753// Retrieve global cpu information.
754// Returns false if something went wrong;
755// the content of pci is undefined in this case.
756bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
757  assert(pci, "get_cpuinfo: invalid parameter");
758  memset(pci, 0, sizeof(cpuinfo_t));
759
760  perfstat_cpu_total_t psct;
761  memset (&psct, '\0', sizeof(psct));
762
763  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
764    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
765    assert(0, "perfstat_cpu_total() failed");
766    return false;
767  }
768
769  // global cpu information
770  strcpy (pci->description, psct.description);
771  pci->processorHZ = psct.processorHZ;
772  pci->ncpus = psct.ncpus;
773  os::Aix::_logical_cpus = psct.ncpus;
774  for (int i = 0; i < 3; i++) {
775    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
776  }
777
778  // get the processor version from _system_configuration
779  switch (_system_configuration.version) {
780  case PV_7:
781    strcpy(pci->version, "Power PC 7");
782    break;
783  case PV_6_1:
784    strcpy(pci->version, "Power PC 6 DD1.x");
785    break;
786  case PV_6:
787    strcpy(pci->version, "Power PC 6");
788    break;
789  case PV_5:
790    strcpy(pci->version, "Power PC 5");
791    break;
792  case PV_5_2:
793    strcpy(pci->version, "Power PC 5_2");
794    break;
795  case PV_5_3:
796    strcpy(pci->version, "Power PC 5_3");
797    break;
798  case PV_5_Compat:
799    strcpy(pci->version, "PV_5_Compat");
800    break;
801  case PV_6_Compat:
802    strcpy(pci->version, "PV_6_Compat");
803    break;
804  case PV_7_Compat:
805    strcpy(pci->version, "PV_7_Compat");
806    break;
807  default:
808    strcpy(pci->version, "unknown");
809  }
810
811  return true;
812
813} //end os::Aix::get_cpuinfo
814
815//////////////////////////////////////////////////////////////////////////////
816// detecting pthread library
817
818void os::Aix::libpthread_init() {
819  return;
820}
821
822//////////////////////////////////////////////////////////////////////////////
823// create new thread
824
825// Thread start routine for all newly created threads
826static void *java_start(Thread *thread) {
827
828  // find out my own stack dimensions
829  {
830    // actually, this should do exactly the same as thread->record_stack_base_and_size...
831    address base = 0;
832    size_t size = 0;
833    query_stack_dimensions(&base, &size);
834    thread->set_stack_base(base);
835    thread->set_stack_size(size);
836  }
837
838  // Do some sanity checks.
839  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
840
841  // Try to randomize the cache line index of hot stack frames.
842  // This helps when threads of the same stack traces evict each other's
843  // cache lines. The threads can be either from the same JVM instance, or
844  // from different JVM instances. The benefit is especially true for
845  // processors with hyperthreading technology.
846
847  static int counter = 0;
848  int pid = os::current_process_id();
849  alloca(((pid ^ counter++) & 7) * 128);
850
851  ThreadLocalStorage::set_thread(thread);
852
853  OSThread* osthread = thread->osthread();
854
855  // thread_id is kernel thread id (similar to Solaris LWP id)
856  osthread->set_thread_id(os::Aix::gettid());
857
858  // initialize signal mask for this thread
859  os::Aix::hotspot_sigmask(thread);
860
861  // initialize floating point control register
862  os::Aix::init_thread_fpu_state();
863
864  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
865
866  // call one more level start routine
867  thread->run();
868
869  return 0;
870}
871
872bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
873
874  // We want the whole function to be synchronized.
875  ThreadCritical cs;
876
877  assert(thread->osthread() == NULL, "caller responsible");
878
879  // Allocate the OSThread object
880  OSThread* osthread = new OSThread(NULL, NULL);
881  if (osthread == NULL) {
882    return false;
883  }
884
885  // set the correct thread state
886  osthread->set_thread_type(thr_type);
887
888  // Initial state is ALLOCATED but not INITIALIZED
889  osthread->set_state(ALLOCATED);
890
891  thread->set_osthread(osthread);
892
893  // init thread attributes
894  pthread_attr_t attr;
895  pthread_attr_init(&attr);
896  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
897
898  // Make sure we run in 1:1 kernel-user-thread mode.
899  if (os::Aix::on_aix()) {
900    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
901    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
902  } // end: aix
903
904  // Start in suspended state, and in os::thread_start, wake the thread up.
905  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
906
907  // calculate stack size if it's not specified by caller
908  if (os::Aix::supports_variable_stack_size()) {
909    if (stack_size == 0) {
910      stack_size = os::Aix::default_stack_size(thr_type);
911
912      switch (thr_type) {
913      case os::java_thread:
914        // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
915        assert(JavaThread::stack_size_at_create() > 0, "this should be set");
916        stack_size = JavaThread::stack_size_at_create();
917        break;
918      case os::compiler_thread:
919        if (CompilerThreadStackSize > 0) {
920          stack_size = (size_t)(CompilerThreadStackSize * K);
921          break;
922        } // else fall through:
923          // use VMThreadStackSize if CompilerThreadStackSize is not defined
924      case os::vm_thread:
925      case os::pgc_thread:
926      case os::cgc_thread:
927      case os::watcher_thread:
928        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
929        break;
930      }
931    }
932
933    stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
934    pthread_attr_setstacksize(&attr, stack_size);
935  } //else let thread_create() pick the default value (96 K on AIX)
936
937  pthread_t tid;
938  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
939
940  pthread_attr_destroy(&attr);
941
942  if (ret != 0) {
943    if (PrintMiscellaneous && (Verbose || WizardMode)) {
944      perror("pthread_create()");
945    }
946    // Need to clean up stuff we've allocated so far
947    thread->set_osthread(NULL);
948    delete osthread;
949    return false;
950  }
951
952  // Store pthread info into the OSThread
953  osthread->set_pthread_id(tid);
954
955  return true;
956}
957
958/////////////////////////////////////////////////////////////////////////////
959// attach existing thread
960
961// bootstrap the main thread
962bool os::create_main_thread(JavaThread* thread) {
963  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
964  return create_attached_thread(thread);
965}
966
967bool os::create_attached_thread(JavaThread* thread) {
968#ifdef ASSERT
969    thread->verify_not_published();
970#endif
971
972  // Allocate the OSThread object
973  OSThread* osthread = new OSThread(NULL, NULL);
974
975  if (osthread == NULL) {
976    return false;
977  }
978
979  // Store pthread info into the OSThread
980  osthread->set_thread_id(os::Aix::gettid());
981  osthread->set_pthread_id(::pthread_self());
982
983  // initialize floating point control register
984  os::Aix::init_thread_fpu_state();
985
986  // some sanity checks
987  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
988
989  // Initial thread state is RUNNABLE
990  osthread->set_state(RUNNABLE);
991
992  thread->set_osthread(osthread);
993
994  if (UseNUMA) {
995    int lgrp_id = os::numa_get_group_id();
996    if (lgrp_id != -1) {
997      thread->set_lgrp_id(lgrp_id);
998    }
999  }
1000
1001  // initialize signal mask for this thread
1002  // and save the caller's signal mask
1003  os::Aix::hotspot_sigmask(thread);
1004
1005  return true;
1006}
1007
1008void os::pd_start_thread(Thread* thread) {
1009  int status = pthread_continue_np(thread->osthread()->pthread_id());
1010  assert(status == 0, "thr_continue failed");
1011}
1012
1013// Free OS resources related to the OSThread
1014void os::free_thread(OSThread* osthread) {
1015  assert(osthread != NULL, "osthread not set");
1016
1017  if (Thread::current()->osthread() == osthread) {
1018    // Restore caller's signal mask
1019    sigset_t sigmask = osthread->caller_sigmask();
1020    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1021   }
1022
1023  delete osthread;
1024}
1025
1026//////////////////////////////////////////////////////////////////////////////
1027// thread local storage
1028
1029int os::allocate_thread_local_storage() {
1030  pthread_key_t key;
1031  int rslt = pthread_key_create(&key, NULL);
1032  assert(rslt == 0, "cannot allocate thread local storage");
1033  return (int)key;
1034}
1035
1036// Note: This is currently not used by VM, as we don't destroy TLS key
1037// on VM exit.
1038void os::free_thread_local_storage(int index) {
1039  int rslt = pthread_key_delete((pthread_key_t)index);
1040  assert(rslt == 0, "invalid index");
1041}
1042
1043void os::thread_local_storage_at_put(int index, void* value) {
1044  int rslt = pthread_setspecific((pthread_key_t)index, value);
1045  assert(rslt == 0, "pthread_setspecific failed");
1046}
1047
1048extern "C" Thread* get_thread() {
1049  return ThreadLocalStorage::thread();
1050}
1051
1052////////////////////////////////////////////////////////////////////////////////
1053// time support
1054
1055// Time since start-up in seconds to a fine granularity.
1056// Used by VMSelfDestructTimer and the MemProfiler.
1057double os::elapsedTime() {
1058  return (double)(os::elapsed_counter()) * 0.000001;
1059}
1060
1061jlong os::elapsed_counter() {
1062  timeval time;
1063  int status = gettimeofday(&time, NULL);
1064  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1065}
1066
1067jlong os::elapsed_frequency() {
1068  return (1000 * 1000);
1069}
1070
1071// For now, we say that linux does not support vtime. I have no idea
1072// whether it can actually be made to (DLD, 9/13/05).
1073
1074bool os::supports_vtime() { return false; }
1075bool os::enable_vtime()   { return false; }
1076bool os::vtime_enabled()  { return false; }
1077double os::elapsedVTime() {
1078  // better than nothing, but not much
1079  return elapsedTime();
1080}
1081
1082jlong os::javaTimeMillis() {
1083  timeval time;
1084  int status = gettimeofday(&time, NULL);
1085  assert(status != -1, "aix error at gettimeofday()");
1086  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1087}
1088
1089// We need to manually declare mread_real_time,
1090// because IBM didn't provide a prototype in time.h.
1091// (they probably only ever tested in C, not C++)
1092extern "C"
1093int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1094
1095jlong os::javaTimeNanos() {
1096  if (os::Aix::on_pase()) {
1097    Unimplemented();
1098    return 0;
1099  }
1100  else {
1101    // On AIX use the precision of processors real time clock
1102    // or time base registers.
1103    timebasestruct_t time;
1104    int rc;
1105
1106    // If the CPU has a time register, it will be used and
1107    // we have to convert to real time first. After convertion we have following data:
1108    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1109    // time.tb_low  [nanoseconds after the last full second above]
1110    // We better use mread_real_time here instead of read_real_time
1111    // to ensure that we will get a monotonic increasing time.
1112    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1113      rc = time_base_to_time(&time, TIMEBASE_SZ);
1114      assert(rc != -1, "aix error at time_base_to_time()");
1115    }
1116    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1117  }
1118}
1119
1120void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1121  info_ptr->max_value = ALL_64_BITS;
1122  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1123  info_ptr->may_skip_backward = false;
1124  info_ptr->may_skip_forward = false;
1125  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1126}
1127
1128// Return the real, user, and system times in seconds from an
1129// arbitrary fixed point in the past.
1130bool os::getTimesSecs(double* process_real_time,
1131                      double* process_user_time,
1132                      double* process_system_time) {
1133  struct tms ticks;
1134  clock_t real_ticks = times(&ticks);
1135
1136  if (real_ticks == (clock_t) (-1)) {
1137    return false;
1138  } else {
1139    double ticks_per_second = (double) clock_tics_per_sec;
1140    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1141    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1142    *process_real_time = ((double) real_ticks) / ticks_per_second;
1143
1144    return true;
1145  }
1146}
1147
1148
1149char * os::local_time_string(char *buf, size_t buflen) {
1150  struct tm t;
1151  time_t long_time;
1152  time(&long_time);
1153  localtime_r(&long_time, &t);
1154  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1155               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1156               t.tm_hour, t.tm_min, t.tm_sec);
1157  return buf;
1158}
1159
1160struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1161  return localtime_r(clock, res);
1162}
1163
1164////////////////////////////////////////////////////////////////////////////////
1165// runtime exit support
1166
1167// Note: os::shutdown() might be called very early during initialization, or
1168// called from signal handler. Before adding something to os::shutdown(), make
1169// sure it is async-safe and can handle partially initialized VM.
1170void os::shutdown() {
1171
1172  // allow PerfMemory to attempt cleanup of any persistent resources
1173  perfMemory_exit();
1174
1175  // needs to remove object in file system
1176  AttachListener::abort();
1177
1178  // flush buffered output, finish log files
1179  ostream_abort();
1180
1181  // Check for abort hook
1182  abort_hook_t abort_hook = Arguments::abort_hook();
1183  if (abort_hook != NULL) {
1184    abort_hook();
1185  }
1186
1187}
1188
1189// Note: os::abort() might be called very early during initialization, or
1190// called from signal handler. Before adding something to os::abort(), make
1191// sure it is async-safe and can handle partially initialized VM.
1192void os::abort(bool dump_core) {
1193  os::shutdown();
1194  if (dump_core) {
1195#ifndef PRODUCT
1196    fdStream out(defaultStream::output_fd());
1197    out.print_raw("Current thread is ");
1198    char buf[16];
1199    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1200    out.print_raw_cr(buf);
1201    out.print_raw_cr("Dumping core ...");
1202#endif
1203    ::abort(); // dump core
1204  }
1205
1206  ::exit(1);
1207}
1208
1209// Die immediately, no exit hook, no abort hook, no cleanup.
1210void os::die() {
1211  ::abort();
1212}
1213
1214// Unused on Aix for now.
1215void os::set_error_file(const char *logfile) {}
1216
1217
1218// This method is a copy of JDK's sysGetLastErrorString
1219// from src/solaris/hpi/src/system_md.c
1220
1221size_t os::lasterror(char *buf, size_t len) {
1222
1223  if (errno == 0)  return 0;
1224
1225  const char *s = ::strerror(errno);
1226  size_t n = ::strlen(s);
1227  if (n >= len) {
1228    n = len - 1;
1229  }
1230  ::strncpy(buf, s, n);
1231  buf[n] = '\0';
1232  return n;
1233}
1234
1235intx os::current_thread_id() { return (intx)pthread_self(); }
1236int os::current_process_id() {
1237
1238  // This implementation returns a unique pid, the pid of the
1239  // launcher thread that starts the vm 'process'.
1240
1241  // Under POSIX, getpid() returns the same pid as the
1242  // launcher thread rather than a unique pid per thread.
1243  // Use gettid() if you want the old pre NPTL behaviour.
1244
1245  // if you are looking for the result of a call to getpid() that
1246  // returns a unique pid for the calling thread, then look at the
1247  // OSThread::thread_id() method in osThread_linux.hpp file
1248
1249  return (int)(_initial_pid ? _initial_pid : getpid());
1250}
1251
1252// DLL functions
1253
1254const char* os::dll_file_extension() { return ".so"; }
1255
1256// This must be hard coded because it's the system's temporary
1257// directory not the java application's temp directory, ala java.io.tmpdir.
1258const char* os::get_temp_directory() { return "/tmp"; }
1259
1260static bool file_exists(const char* filename) {
1261  struct stat statbuf;
1262  if (filename == NULL || strlen(filename) == 0) {
1263    return false;
1264  }
1265  return os::stat(filename, &statbuf) == 0;
1266}
1267
1268bool os::dll_build_name(char* buffer, size_t buflen,
1269                        const char* pname, const char* fname) {
1270  bool retval = false;
1271  // Copied from libhpi
1272  const size_t pnamelen = pname ? strlen(pname) : 0;
1273
1274  // Return error on buffer overflow.
1275  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1276    *buffer = '\0';
1277    return retval;
1278  }
1279
1280  if (pnamelen == 0) {
1281    snprintf(buffer, buflen, "lib%s.so", fname);
1282    retval = true;
1283  } else if (strchr(pname, *os::path_separator()) != NULL) {
1284    int n;
1285    char** pelements = split_path(pname, &n);
1286    for (int i = 0; i < n; i++) {
1287      // Really shouldn't be NULL, but check can't hurt
1288      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1289        continue; // skip the empty path values
1290      }
1291      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1292      if (file_exists(buffer)) {
1293        retval = true;
1294        break;
1295      }
1296    }
1297    // release the storage
1298    for (int i = 0; i < n; i++) {
1299      if (pelements[i] != NULL) {
1300        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1301      }
1302    }
1303    if (pelements != NULL) {
1304      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1305    }
1306  } else {
1307    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1308    retval = true;
1309  }
1310  return retval;
1311}
1312
1313// Check if addr is inside libjvm.so.
1314bool os::address_is_in_vm(address addr) {
1315
1316  // Input could be a real pc or a function pointer literal. The latter
1317  // would be a function descriptor residing in the data segment of a module.
1318
1319  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1320  if (lib) {
1321    if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1322      return true;
1323    } else {
1324      return false;
1325    }
1326  } else {
1327    lib = LoadedLibraries::find_for_data_address(addr);
1328    if (lib) {
1329      if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1330        return true;
1331      } else {
1332        return false;
1333      }
1334    } else {
1335      return false;
1336    }
1337  }
1338}
1339
1340// Resolve an AIX function descriptor literal to a code pointer.
1341// If the input is a valid code pointer to a text segment of a loaded module,
1342//   it is returned unchanged.
1343// If the input is a valid AIX function descriptor, it is resolved to the
1344//   code entry point.
1345// If the input is neither a valid function descriptor nor a valid code pointer,
1346//   NULL is returned.
1347static address resolve_function_descriptor_to_code_pointer(address p) {
1348
1349  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1350  if (lib) {
1351    // its a real code pointer
1352    return p;
1353  } else {
1354    lib = LoadedLibraries::find_for_data_address(p);
1355    if (lib) {
1356      // pointer to data segment, potential function descriptor
1357      address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1358      if (LoadedLibraries::find_for_text_address(code_entry)) {
1359        // Its a function descriptor
1360        return code_entry;
1361      }
1362    }
1363  }
1364  return NULL;
1365}
1366
1367bool os::dll_address_to_function_name(address addr, char *buf,
1368                                      int buflen, int *offset) {
1369  if (offset) {
1370    *offset = -1;
1371  }
1372  if (buf) {
1373    buf[0] = '\0';
1374  }
1375
1376  // Resolve function ptr literals first.
1377  addr = resolve_function_descriptor_to_code_pointer(addr);
1378  if (!addr) {
1379    return false;
1380  }
1381
1382  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1383  return Decoder::decode(addr, buf, buflen, offset);
1384}
1385
1386static int getModuleName(codeptr_t pc,                    // [in] program counter
1387                         char* p_name, size_t namelen,    // [out] optional: function name
1388                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1389                         ) {
1390
1391  // initialize output parameters
1392  if (p_name && namelen > 0) {
1393    *p_name = '\0';
1394  }
1395  if (p_errmsg && errmsglen > 0) {
1396    *p_errmsg = '\0';
1397  }
1398
1399  const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1400  if (lib) {
1401    if (p_name && namelen > 0) {
1402      sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1403    }
1404    return 0;
1405  }
1406
1407  if (Verbose) {
1408    fprintf(stderr, "pc outside any module");
1409  }
1410
1411  return -1;
1412
1413}
1414
1415bool os::dll_address_to_library_name(address addr, char* buf,
1416                                     int buflen, int* offset) {
1417  if (offset) {
1418    *offset = -1;
1419  }
1420  if (buf) {
1421      buf[0] = '\0';
1422  }
1423
1424  // Resolve function ptr literals first.
1425  addr = resolve_function_descriptor_to_code_pointer(addr);
1426  if (!addr) {
1427    return false;
1428  }
1429
1430  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1431    return true;
1432  }
1433  return false;
1434}
1435
1436// Loads .dll/.so and in case of error it checks if .dll/.so was built
1437// for the same architecture as Hotspot is running on
1438void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1439
1440  if (ebuf && ebuflen > 0) {
1441    ebuf[0] = '\0';
1442    ebuf[ebuflen - 1] = '\0';
1443  }
1444
1445  if (!filename || strlen(filename) == 0) {
1446    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1447    return NULL;
1448  }
1449
1450  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1451  void * result= ::dlopen(filename, RTLD_LAZY);
1452  if (result != NULL) {
1453    // Reload dll cache. Don't do this in signal handling.
1454    LoadedLibraries::reload();
1455    return result;
1456  } else {
1457    // error analysis when dlopen fails
1458    const char* const error_report = ::dlerror();
1459    if (error_report && ebuf && ebuflen > 0) {
1460      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1461               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1462    }
1463  }
1464  return NULL;
1465}
1466
1467// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1468// chances are you might want to run the generated bits against glibc-2.0
1469// libdl.so, so always use locking for any version of glibc.
1470void* os::dll_lookup(void* handle, const char* name) {
1471  pthread_mutex_lock(&dl_mutex);
1472  void* res = dlsym(handle, name);
1473  pthread_mutex_unlock(&dl_mutex);
1474  return res;
1475}
1476
1477void* os::get_default_process_handle() {
1478  return (void*)::dlopen(NULL, RTLD_LAZY);
1479}
1480
1481void os::print_dll_info(outputStream *st) {
1482  st->print_cr("Dynamic libraries:");
1483  LoadedLibraries::print(st);
1484}
1485
1486void os::print_os_info(outputStream* st) {
1487  st->print("OS:");
1488
1489  st->print("uname:");
1490  struct utsname name;
1491  uname(&name);
1492  st->print(name.sysname); st->print(" ");
1493  st->print(name.nodename); st->print(" ");
1494  st->print(name.release); st->print(" ");
1495  st->print(name.version); st->print(" ");
1496  st->print(name.machine);
1497  st->cr();
1498
1499  // rlimit
1500  st->print("rlimit:");
1501  struct rlimit rlim;
1502
1503  st->print(" STACK ");
1504  getrlimit(RLIMIT_STACK, &rlim);
1505  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1506  else st->print("%uk", rlim.rlim_cur >> 10);
1507
1508  st->print(", CORE ");
1509  getrlimit(RLIMIT_CORE, &rlim);
1510  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1511  else st->print("%uk", rlim.rlim_cur >> 10);
1512
1513  st->print(", NPROC ");
1514  st->print("%d", sysconf(_SC_CHILD_MAX));
1515
1516  st->print(", NOFILE ");
1517  getrlimit(RLIMIT_NOFILE, &rlim);
1518  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1519  else st->print("%d", rlim.rlim_cur);
1520
1521  st->print(", AS ");
1522  getrlimit(RLIMIT_AS, &rlim);
1523  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1524  else st->print("%uk", rlim.rlim_cur >> 10);
1525
1526  // Print limits on DATA, because it limits the C-heap.
1527  st->print(", DATA ");
1528  getrlimit(RLIMIT_DATA, &rlim);
1529  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1530  else st->print("%uk", rlim.rlim_cur >> 10);
1531  st->cr();
1532
1533  // load average
1534  st->print("load average:");
1535  double loadavg[3] = {-1.L, -1.L, -1.L};
1536  os::loadavg(loadavg, 3);
1537  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1538  st->cr();
1539}
1540
1541void os::print_memory_info(outputStream* st) {
1542
1543  st->print_cr("Memory:");
1544
1545  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1546  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1547  st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1548  st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1549  st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1550  if (g_multipage_error != 0) {
1551    st->print_cr("  multipage error: %d", g_multipage_error);
1552  }
1553
1554  // print out LDR_CNTRL because it affects the default page sizes
1555  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1556  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1557
1558  const char* const extshm = ::getenv("EXTSHM");
1559  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1560
1561  // Call os::Aix::get_meminfo() to retrieve memory statistics.
1562  os::Aix::meminfo_t mi;
1563  if (os::Aix::get_meminfo(&mi)) {
1564    char buffer[256];
1565    if (os::Aix::on_aix()) {
1566      jio_snprintf(buffer, sizeof(buffer),
1567                   "  physical total : %llu\n"
1568                   "  physical free  : %llu\n"
1569                   "  swap total     : %llu\n"
1570                   "  swap free      : %llu\n",
1571                   mi.real_total,
1572                   mi.real_free,
1573                   mi.pgsp_total,
1574                   mi.pgsp_free);
1575    } else {
1576      Unimplemented();
1577    }
1578    st->print_raw(buffer);
1579  } else {
1580    st->print_cr("  (no more information available)");
1581  }
1582}
1583
1584void os::pd_print_cpu_info(outputStream* st) {
1585  // cpu
1586  st->print("CPU:");
1587  st->print("total %d", os::processor_count());
1588  // It's not safe to query number of active processors after crash
1589  // st->print("(active %d)", os::active_processor_count());
1590  st->print(" %s", VM_Version::cpu_features());
1591  st->cr();
1592}
1593
1594void os::print_siginfo(outputStream* st, void* siginfo) {
1595  // Use common posix version.
1596  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1597  st->cr();
1598}
1599
1600
1601static void print_signal_handler(outputStream* st, int sig,
1602                                 char* buf, size_t buflen);
1603
1604void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1605  st->print_cr("Signal Handlers:");
1606  print_signal_handler(st, SIGSEGV, buf, buflen);
1607  print_signal_handler(st, SIGBUS , buf, buflen);
1608  print_signal_handler(st, SIGFPE , buf, buflen);
1609  print_signal_handler(st, SIGPIPE, buf, buflen);
1610  print_signal_handler(st, SIGXFSZ, buf, buflen);
1611  print_signal_handler(st, SIGILL , buf, buflen);
1612  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1613  print_signal_handler(st, SR_signum, buf, buflen);
1614  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1615  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1616  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1617  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1618  print_signal_handler(st, SIGTRAP, buf, buflen);
1619  print_signal_handler(st, SIGDANGER, buf, buflen);
1620}
1621
1622static char saved_jvm_path[MAXPATHLEN] = {0};
1623
1624// Find the full path to the current module, libjvm.so or libjvm_g.so
1625void os::jvm_path(char *buf, jint buflen) {
1626  // Error checking.
1627  if (buflen < MAXPATHLEN) {
1628    assert(false, "must use a large-enough buffer");
1629    buf[0] = '\0';
1630    return;
1631  }
1632  // Lazy resolve the path to current module.
1633  if (saved_jvm_path[0] != 0) {
1634    strcpy(buf, saved_jvm_path);
1635    return;
1636  }
1637
1638  Dl_info dlinfo;
1639  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1640  assert(ret != 0, "cannot locate libjvm");
1641  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1642  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1643
1644  strcpy(saved_jvm_path, buf);
1645}
1646
1647void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1648  // no prefix required, not even "_"
1649}
1650
1651void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1652  // no suffix required
1653}
1654
1655////////////////////////////////////////////////////////////////////////////////
1656// sun.misc.Signal support
1657
1658static volatile jint sigint_count = 0;
1659
1660static void
1661UserHandler(int sig, void *siginfo, void *context) {
1662  // 4511530 - sem_post is serialized and handled by the manager thread. When
1663  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1664  // don't want to flood the manager thread with sem_post requests.
1665  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1666    return;
1667
1668  // Ctrl-C is pressed during error reporting, likely because the error
1669  // handler fails to abort. Let VM die immediately.
1670  if (sig == SIGINT && is_error_reported()) {
1671    os::die();
1672  }
1673
1674  os::signal_notify(sig);
1675}
1676
1677void* os::user_handler() {
1678  return CAST_FROM_FN_PTR(void*, UserHandler);
1679}
1680
1681extern "C" {
1682  typedef void (*sa_handler_t)(int);
1683  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1684}
1685
1686void* os::signal(int signal_number, void* handler) {
1687  struct sigaction sigAct, oldSigAct;
1688
1689  sigfillset(&(sigAct.sa_mask));
1690
1691  // Do not block out synchronous signals in the signal handler.
1692  // Blocking synchronous signals only makes sense if you can really
1693  // be sure that those signals won't happen during signal handling,
1694  // when the blocking applies.  Normal signal handlers are lean and
1695  // do not cause signals. But our signal handlers tend to be "risky"
1696  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1697  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1698  // by a SIGILL, which was blocked due to the signal mask. The process
1699  // just hung forever. Better to crash from a secondary signal than to hang.
1700  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1701  sigdelset(&(sigAct.sa_mask), SIGBUS);
1702  sigdelset(&(sigAct.sa_mask), SIGILL);
1703  sigdelset(&(sigAct.sa_mask), SIGFPE);
1704  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1705
1706  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1707
1708  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1709
1710  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1711    // -1 means registration failed
1712    return (void *)-1;
1713  }
1714
1715  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1716}
1717
1718void os::signal_raise(int signal_number) {
1719  ::raise(signal_number);
1720}
1721
1722//
1723// The following code is moved from os.cpp for making this
1724// code platform specific, which it is by its very nature.
1725//
1726
1727// Will be modified when max signal is changed to be dynamic
1728int os::sigexitnum_pd() {
1729  return NSIG;
1730}
1731
1732// a counter for each possible signal value
1733static volatile jint pending_signals[NSIG+1] = { 0 };
1734
1735// Linux(POSIX) specific hand shaking semaphore.
1736static sem_t sig_sem;
1737
1738void os::signal_init_pd() {
1739  // Initialize signal structures
1740  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1741
1742  // Initialize signal semaphore
1743  int rc = ::sem_init(&sig_sem, 0, 0);
1744  guarantee(rc != -1, "sem_init failed");
1745}
1746
1747void os::signal_notify(int sig) {
1748  Atomic::inc(&pending_signals[sig]);
1749  ::sem_post(&sig_sem);
1750}
1751
1752static int check_pending_signals(bool wait) {
1753  Atomic::store(0, &sigint_count);
1754  for (;;) {
1755    for (int i = 0; i < NSIG + 1; i++) {
1756      jint n = pending_signals[i];
1757      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1758        return i;
1759      }
1760    }
1761    if (!wait) {
1762      return -1;
1763    }
1764    JavaThread *thread = JavaThread::current();
1765    ThreadBlockInVM tbivm(thread);
1766
1767    bool threadIsSuspended;
1768    do {
1769      thread->set_suspend_equivalent();
1770      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1771
1772      ::sem_wait(&sig_sem);
1773
1774      // were we externally suspended while we were waiting?
1775      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1776      if (threadIsSuspended) {
1777        //
1778        // The semaphore has been incremented, but while we were waiting
1779        // another thread suspended us. We don't want to continue running
1780        // while suspended because that would surprise the thread that
1781        // suspended us.
1782        //
1783        ::sem_post(&sig_sem);
1784
1785        thread->java_suspend_self();
1786      }
1787    } while (threadIsSuspended);
1788  }
1789}
1790
1791int os::signal_lookup() {
1792  return check_pending_signals(false);
1793}
1794
1795int os::signal_wait() {
1796  return check_pending_signals(true);
1797}
1798
1799////////////////////////////////////////////////////////////////////////////////
1800// Virtual Memory
1801
1802// AddrRange describes an immutable address range
1803//
1804// This is a helper class for the 'shared memory bookkeeping' below.
1805class AddrRange {
1806  friend class ShmBkBlock;
1807
1808  char* _start;
1809  size_t _size;
1810
1811public:
1812
1813  AddrRange(char* start, size_t size)
1814    : _start(start), _size(size)
1815  {}
1816
1817  AddrRange(const AddrRange& r)
1818    : _start(r.start()), _size(r.size())
1819  {}
1820
1821  char* start() const { return _start; }
1822  size_t size() const { return _size; }
1823  char* end() const { return _start + _size; }
1824  bool is_empty() const { return _size == 0 ? true : false; }
1825
1826  static AddrRange empty_range() { return AddrRange(NULL, 0); }
1827
1828  bool contains(const char* p) const {
1829    return start() <= p && end() > p;
1830  }
1831
1832  bool contains(const AddrRange& range) const {
1833    return start() <= range.start() && end() >= range.end();
1834  }
1835
1836  bool intersects(const AddrRange& range) const {
1837    return (range.start() <= start() && range.end() > start()) ||
1838           (range.start() < end() && range.end() >= end()) ||
1839           contains(range);
1840  }
1841
1842  bool is_same_range(const AddrRange& range) const {
1843    return start() == range.start() && size() == range.size();
1844  }
1845
1846  // return the closest inside range consisting of whole pages
1847  AddrRange find_closest_aligned_range(size_t pagesize) const {
1848    if (pagesize == 0 || is_empty()) {
1849      return empty_range();
1850    }
1851    char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1852    char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1853    if (from > to) {
1854      return empty_range();
1855    }
1856    return AddrRange(from, to - from);
1857  }
1858};
1859
1860////////////////////////////////////////////////////////////////////////////
1861// shared memory bookkeeping
1862//
1863// the os::reserve_memory() API and friends hand out different kind of memory, depending
1864// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1865//
1866// But these memory types have to be treated differently. For example, to uncommit
1867// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1868// disclaim64() is needed.
1869//
1870// Therefore we need to keep track of the allocated memory segments and their
1871// properties.
1872
1873// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1874class ShmBkBlock : public CHeapObj<mtInternal> {
1875
1876  ShmBkBlock* _next;
1877
1878protected:
1879
1880  AddrRange _range;
1881  const size_t _pagesize;
1882  const bool _pinned;
1883
1884public:
1885
1886  ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1887    : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1888
1889    assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1890    assert(!_range.is_empty(), "invalid range");
1891  }
1892
1893  virtual void print(outputStream* st) const {
1894    st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1895              _range.start(), _range.end(), _range.size(),
1896              _range.size() / _pagesize, describe_pagesize(_pagesize),
1897              _pinned ? "pinned" : "");
1898  }
1899
1900  enum Type { MMAP, SHMAT };
1901  virtual Type getType() = 0;
1902
1903  char* base() const { return _range.start(); }
1904  size_t size() const { return _range.size(); }
1905
1906  void setAddrRange(AddrRange range) {
1907    _range = range;
1908  }
1909
1910  bool containsAddress(const char* p) const {
1911    return _range.contains(p);
1912  }
1913
1914  bool containsRange(const char* p, size_t size) const {
1915    return _range.contains(AddrRange((char*)p, size));
1916  }
1917
1918  bool isSameRange(const char* p, size_t size) const {
1919    return _range.is_same_range(AddrRange((char*)p, size));
1920  }
1921
1922  virtual bool disclaim(char* p, size_t size) = 0;
1923  virtual bool release() = 0;
1924
1925  // blocks live in a list.
1926  ShmBkBlock* next() const { return _next; }
1927  void set_next(ShmBkBlock* blk) { _next = blk; }
1928
1929}; // end: ShmBkBlock
1930
1931
1932// ShmBkMappedBlock: describes an block allocated with mmap()
1933class ShmBkMappedBlock : public ShmBkBlock {
1934public:
1935
1936  ShmBkMappedBlock(AddrRange range)
1937    : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1938
1939  void print(outputStream* st) const {
1940    ShmBkBlock::print(st);
1941    st->print_cr(" - mmap'ed");
1942  }
1943
1944  Type getType() {
1945    return MMAP;
1946  }
1947
1948  bool disclaim(char* p, size_t size) {
1949
1950    AddrRange r(p, size);
1951
1952    guarantee(_range.contains(r), "invalid disclaim");
1953
1954    // only disclaim whole ranges.
1955    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1956    if (r2.is_empty()) {
1957      return true;
1958    }
1959
1960    const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1961
1962    if (rc != 0) {
1963      warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1964    }
1965
1966    return rc == 0 ? true : false;
1967  }
1968
1969  bool release() {
1970    // mmap'ed blocks are released using munmap
1971    if (::munmap(_range.start(), _range.size()) != 0) {
1972      warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1973      return false;
1974    }
1975    return true;
1976  }
1977}; // end: ShmBkMappedBlock
1978
1979// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
1980class ShmBkShmatedBlock : public ShmBkBlock {
1981public:
1982
1983  ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
1984    : ShmBkBlock(range, pagesize, pinned) {}
1985
1986  void print(outputStream* st) const {
1987    ShmBkBlock::print(st);
1988    st->print_cr(" - shmat'ed");
1989  }
1990
1991  Type getType() {
1992    return SHMAT;
1993  }
1994
1995  bool disclaim(char* p, size_t size) {
1996
1997    AddrRange r(p, size);
1998
1999    if (_pinned) {
2000      return true;
2001    }
2002
2003    // shmat'ed blocks are disclaimed using disclaim64
2004    guarantee(_range.contains(r), "invalid disclaim");
2005
2006    // only disclaim whole ranges.
2007    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2008    if (r2.is_empty()) {
2009      return true;
2010    }
2011
2012    const bool rc = my_disclaim64(r2.start(), r2.size());
2013
2014    if (Verbose && !rc) {
2015      warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2016    }
2017
2018    return rc;
2019  }
2020
2021  bool release() {
2022    bool rc = false;
2023    if (::shmdt(_range.start()) != 0) {
2024      warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2025    } else {
2026      rc = true;
2027    }
2028    return rc;
2029  }
2030
2031}; // end: ShmBkShmatedBlock
2032
2033static ShmBkBlock* g_shmbk_list = NULL;
2034static volatile jint g_shmbk_table_lock = 0;
2035
2036// keep some usage statistics
2037static struct {
2038  int nodes;    // number of nodes in list
2039  size_t bytes; // reserved - not committed - bytes.
2040  int reserves; // how often reserve was called
2041  int lookups;  // how often a lookup was made
2042} g_shmbk_stats = { 0, 0, 0, 0 };
2043
2044// add information about a shared memory segment to the bookkeeping
2045static void shmbk_register(ShmBkBlock* p_block) {
2046  guarantee(p_block, "logic error");
2047  p_block->set_next(g_shmbk_list);
2048  g_shmbk_list = p_block;
2049  g_shmbk_stats.reserves ++;
2050  g_shmbk_stats.bytes += p_block->size();
2051  g_shmbk_stats.nodes ++;
2052}
2053
2054// remove information about a shared memory segment by its starting address
2055static void shmbk_unregister(ShmBkBlock* p_block) {
2056  ShmBkBlock* p = g_shmbk_list;
2057  ShmBkBlock* prev = NULL;
2058  while (p) {
2059    if (p == p_block) {
2060      if (prev) {
2061        prev->set_next(p->next());
2062      } else {
2063        g_shmbk_list = p->next();
2064      }
2065      g_shmbk_stats.nodes --;
2066      g_shmbk_stats.bytes -= p->size();
2067      return;
2068    }
2069    prev = p;
2070    p = p->next();
2071  }
2072  assert(false, "should not happen");
2073}
2074
2075// given a pointer, return shared memory bookkeeping record for the segment it points into
2076// using the returned block info must happen under lock protection
2077static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2078  g_shmbk_stats.lookups ++;
2079  ShmBkBlock* p = g_shmbk_list;
2080  while (p) {
2081    if (p->containsAddress(addr)) {
2082      return p;
2083    }
2084    p = p->next();
2085  }
2086  return NULL;
2087}
2088
2089// dump all information about all memory segments allocated with os::reserve_memory()
2090void shmbk_dump_info() {
2091  tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2092    "total reserves: %d total lookups: %d)",
2093    g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2094  const ShmBkBlock* p = g_shmbk_list;
2095  int i = 0;
2096  while (p) {
2097    p->print(tty);
2098    p = p->next();
2099    i ++;
2100  }
2101}
2102
2103#define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
2104#define UNLOCK_SHMBK   }
2105
2106// End: shared memory bookkeeping
2107////////////////////////////////////////////////////////////////////////////////////////////////////
2108
2109int os::vm_page_size() {
2110  // Seems redundant as all get out
2111  assert(os::Aix::page_size() != -1, "must call os::init");
2112  return os::Aix::page_size();
2113}
2114
2115// Aix allocates memory by pages.
2116int os::vm_allocation_granularity() {
2117  assert(os::Aix::page_size() != -1, "must call os::init");
2118  return os::Aix::page_size();
2119}
2120
2121int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2122
2123  // Commit is a noop. There is no explicit commit
2124  // needed on AIX. Memory is committed when touched.
2125  //
2126  // Debug : check address range for validity
2127#ifdef ASSERT
2128  LOCK_SHMBK
2129    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2130    if (!block) {
2131      fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2132      shmbk_dump_info();
2133      assert(false, "invalid pointer");
2134      return false;
2135    } else if (!block->containsRange(addr, size)) {
2136      fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2137      shmbk_dump_info();
2138      assert(false, "invalid range");
2139      return false;
2140    }
2141  UNLOCK_SHMBK
2142#endif // ASSERT
2143
2144  return 0;
2145}
2146
2147bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2148  return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2149}
2150
2151void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2152                                  const char* mesg) {
2153  assert(mesg != NULL, "mesg must be specified");
2154  os::Aix::commit_memory_impl(addr, size, exec);
2155}
2156
2157int os::Aix::commit_memory_impl(char* addr, size_t size,
2158                                size_t alignment_hint, bool exec) {
2159  return os::Aix::commit_memory_impl(addr, size, exec);
2160}
2161
2162bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2163                          bool exec) {
2164  return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2165}
2166
2167void os::pd_commit_memory_or_exit(char* addr, size_t size,
2168                                  size_t alignment_hint, bool exec,
2169                                  const char* mesg) {
2170  os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2171}
2172
2173bool os::pd_uncommit_memory(char* addr, size_t size) {
2174
2175  // Delegate to ShmBkBlock class which knows how to uncommit its memory.
2176
2177  bool rc = false;
2178  LOCK_SHMBK
2179    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2180    if (!block) {
2181      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2182      shmbk_dump_info();
2183      assert(false, "invalid pointer");
2184      return false;
2185    } else if (!block->containsRange(addr, size)) {
2186      fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2187      shmbk_dump_info();
2188      assert(false, "invalid range");
2189      return false;
2190    }
2191    rc = block->disclaim(addr, size);
2192  UNLOCK_SHMBK
2193
2194  if (Verbose && !rc) {
2195    warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2196  }
2197  return rc;
2198}
2199
2200bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2201  return os::guard_memory(addr, size);
2202}
2203
2204bool os::remove_stack_guard_pages(char* addr, size_t size) {
2205  return os::unguard_memory(addr, size);
2206}
2207
2208void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2209}
2210
2211void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2212}
2213
2214void os::numa_make_global(char *addr, size_t bytes) {
2215}
2216
2217void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2218}
2219
2220bool os::numa_topology_changed() {
2221  return false;
2222}
2223
2224size_t os::numa_get_groups_num() {
2225  return 1;
2226}
2227
2228int os::numa_get_group_id() {
2229  return 0;
2230}
2231
2232size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2233  if (size > 0) {
2234    ids[0] = 0;
2235    return 1;
2236  }
2237  return 0;
2238}
2239
2240bool os::get_page_info(char *start, page_info* info) {
2241  return false;
2242}
2243
2244char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2245  return end;
2246}
2247
2248// Flags for reserve_shmatted_memory:
2249#define RESSHM_WISHADDR_OR_FAIL                     1
2250#define RESSHM_TRY_16M_PAGES                        2
2251#define RESSHM_16M_PAGES_OR_FAIL                    4
2252
2253// Result of reserve_shmatted_memory:
2254struct shmatted_memory_info_t {
2255  char* addr;
2256  size_t pagesize;
2257  bool pinned;
2258};
2259
2260// Reserve a section of shmatted memory.
2261// params:
2262// bytes [in]: size of memory, in bytes
2263// requested_addr [in]: wish address.
2264//                      NULL = no wish.
2265//                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2266//                      be obtained, function will fail. Otherwise wish address is treated as hint and
2267//                      another pointer is returned.
2268// flags [in]:          some flags. Valid flags are:
2269//                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2270//                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2271//                          (requires UseLargePages and Use16MPages)
2272//                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2273//                          Otherwise any other page size will do.
2274// p_info [out] :       holds information about the created shared memory segment.
2275static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2276
2277  assert(p_info, "parameter error");
2278
2279  // init output struct.
2280  p_info->addr = NULL;
2281
2282  // neither should we be here for EXTSHM=ON.
2283  if (os::Aix::extshm()) {
2284    ShouldNotReachHere();
2285  }
2286
2287  // extract flags. sanity checks.
2288  const bool wishaddr_or_fail =
2289    flags & RESSHM_WISHADDR_OR_FAIL;
2290  const bool try_16M_pages =
2291    flags & RESSHM_TRY_16M_PAGES;
2292  const bool f16M_pages_or_fail =
2293    flags & RESSHM_16M_PAGES_OR_FAIL;
2294
2295  // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2296  // shmat will fail anyway, so save some cycles by failing right away
2297  if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2298    if (wishaddr_or_fail) {
2299      return false;
2300    } else {
2301      requested_addr = NULL;
2302    }
2303  }
2304
2305  char* addr = NULL;
2306
2307  // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2308  // pagesize dynamically.
2309  const size_t size = align_size_up(bytes, SIZE_16M);
2310
2311  // reserve the shared segment
2312  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2313  if (shmid == -1) {
2314    warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2315    return false;
2316  }
2317
2318  // Important note:
2319  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2320  // We must right after attaching it remove it from the system. System V shm segments are global and
2321  // survive the process.
2322  // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2323
2324  // try forcing the page size
2325  size_t pagesize = -1; // unknown so far
2326
2327  if (UseLargePages) {
2328
2329    struct shmid_ds shmbuf;
2330    memset(&shmbuf, 0, sizeof(shmbuf));
2331
2332    // First, try to take from 16M page pool if...
2333    if (os::Aix::can_use_16M_pages()  // we can ...
2334        && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2335        && try_16M_pages) {           // caller wants us to.
2336      shmbuf.shm_pagesize = SIZE_16M;
2337      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2338        pagesize = SIZE_16M;
2339      } else {
2340        warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2341                size / SIZE_16M, errno);
2342        if (f16M_pages_or_fail) {
2343          goto cleanup_shm;
2344        }
2345      }
2346    }
2347
2348    // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2349    // because the 64K page pool may also be exhausted.
2350    if (pagesize == -1) {
2351      shmbuf.shm_pagesize = SIZE_64K;
2352      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2353        pagesize = SIZE_64K;
2354      } else {
2355        warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2356                size / SIZE_64K, errno);
2357        // here I give up. leave page_size -1 - later, after attaching, we will query the
2358        // real page size of the attached memory. (in theory, it may be something different
2359        // from 4K if LDR_CNTRL SHM_PSIZE is set)
2360      }
2361    }
2362  }
2363
2364  // sanity point
2365  assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2366
2367  // Now attach the shared segment.
2368  addr = (char*) shmat(shmid, requested_addr, 0);
2369  if (addr == (char*)-1) {
2370    // How to handle attach failure:
2371    // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2372    // mandatory, fail, if not, retry anywhere.
2373    // If it failed for any other reason, treat that as fatal error.
2374    addr = NULL;
2375    if (requested_addr) {
2376      if (wishaddr_or_fail) {
2377        goto cleanup_shm;
2378      } else {
2379        addr = (char*) shmat(shmid, NULL, 0);
2380        if (addr == (char*)-1) { // fatal
2381          addr = NULL;
2382          warning("shmat failed (errno: %d)", errno);
2383          goto cleanup_shm;
2384        }
2385      }
2386    } else { // fatal
2387      addr = NULL;
2388      warning("shmat failed (errno: %d)", errno);
2389      goto cleanup_shm;
2390    }
2391  }
2392
2393  // sanity point
2394  assert(addr && addr != (char*) -1, "wrong address");
2395
2396  // after successful Attach remove the segment - right away.
2397  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2398    warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2399    guarantee(false, "failed to remove shared memory segment!");
2400  }
2401  shmid = -1;
2402
2403  // query the real page size. In case setting the page size did not work (see above), the system
2404  // may have given us something other then 4K (LDR_CNTRL)
2405  {
2406    const size_t real_pagesize = os::Aix::query_pagesize(addr);
2407    if (pagesize != -1) {
2408      assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2409    } else {
2410      pagesize = real_pagesize;
2411    }
2412  }
2413
2414  // Now register the reserved block with internal book keeping.
2415  LOCK_SHMBK
2416    const bool pinned = pagesize >= SIZE_16M ? true : false;
2417    ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2418    assert(p_block, "");
2419    shmbk_register(p_block);
2420  UNLOCK_SHMBK
2421
2422cleanup_shm:
2423
2424  // if we have not done so yet, remove the shared memory segment. This is very important.
2425  if (shmid != -1) {
2426    if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2427      warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2428      guarantee(false, "failed to remove shared memory segment!");
2429    }
2430    shmid = -1;
2431  }
2432
2433  // trace
2434  if (Verbose && !addr) {
2435    if (requested_addr != NULL) {
2436      warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
2437    } else {
2438      warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2439    }
2440  }
2441
2442  // hand info to caller
2443  if (addr) {
2444    p_info->addr = addr;
2445    p_info->pagesize = pagesize;
2446    p_info->pinned = pagesize == SIZE_16M ? true : false;
2447  }
2448
2449  // sanity test:
2450  if (requested_addr && addr && wishaddr_or_fail) {
2451    guarantee(addr == requested_addr, "shmat error");
2452  }
2453
2454  // just one more test to really make sure we have no dangling shm segments.
2455  guarantee(shmid == -1, "dangling shm segments");
2456
2457  return addr ? true : false;
2458
2459} // end: reserve_shmatted_memory
2460
2461// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2462// will return NULL in case of an error.
2463static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2464
2465  // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2466  if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2467    warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2468    return NULL;
2469  }
2470
2471  const size_t size = align_size_up(bytes, SIZE_4K);
2472
2473  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2474  // msync(MS_INVALIDATE) (see os::uncommit_memory)
2475  int flags = MAP_ANONYMOUS | MAP_SHARED;
2476
2477  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2478  // it means if wishaddress is given but MAP_FIXED is not set.
2479  //
2480  // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2481  // clobbers the address range, which is probably not what the caller wants. That's
2482  // why I assert here (again) that the SPEC1170 compat mode is off.
2483  // If we want to be able to run under SPEC1170, we have to do some porting and
2484  // testing.
2485  if (requested_addr != NULL) {
2486    assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2487    flags |= MAP_FIXED;
2488  }
2489
2490  char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2491
2492  if (addr == MAP_FAILED) {
2493    // attach failed: tolerate for specific wish addresses. Not being able to attach
2494    // anywhere is a fatal error.
2495    if (requested_addr == NULL) {
2496      // It's ok to fail here if the machine has not enough memory.
2497      warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2498    }
2499    addr = NULL;
2500    goto cleanup_mmap;
2501  }
2502
2503  // If we did request a specific address and that address was not available, fail.
2504  if (addr && requested_addr) {
2505    guarantee(addr == requested_addr, "unexpected");
2506  }
2507
2508  // register this mmap'ed segment with book keeping
2509  LOCK_SHMBK
2510    ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2511    assert(p_block, "");
2512    shmbk_register(p_block);
2513  UNLOCK_SHMBK
2514
2515cleanup_mmap:
2516
2517  // trace
2518  if (Verbose) {
2519    if (addr) {
2520      fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2521    }
2522    else {
2523      if (requested_addr != NULL) {
2524        warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2525      } else {
2526        warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2527      }
2528    }
2529  }
2530
2531  return addr;
2532
2533} // end: reserve_mmaped_memory
2534
2535// Reserves and attaches a shared memory segment.
2536// Will assert if a wish address is given and could not be obtained.
2537char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2538  return os::attempt_reserve_memory_at(bytes, requested_addr);
2539}
2540
2541bool os::pd_release_memory(char* addr, size_t size) {
2542
2543  // delegate to ShmBkBlock class which knows how to uncommit its memory.
2544
2545  bool rc = false;
2546  LOCK_SHMBK
2547    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2548    if (!block) {
2549      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2550      shmbk_dump_info();
2551      assert(false, "invalid pointer");
2552      return false;
2553    }
2554    else if (!block->isSameRange(addr, size)) {
2555      if (block->getType() == ShmBkBlock::MMAP) {
2556        // Release only the same range or a the beginning or the end of a range.
2557        if (block->base() == addr && size < block->size()) {
2558          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2559          assert(b, "");
2560          shmbk_register(b);
2561          block->setAddrRange(AddrRange(addr, size));
2562        }
2563        else if (addr > block->base() && addr + size == block->base() + block->size()) {
2564          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2565          assert(b, "");
2566          shmbk_register(b);
2567          block->setAddrRange(AddrRange(addr, size));
2568        }
2569        else {
2570          fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2571          shmbk_dump_info();
2572          assert(false, "invalid mmap range");
2573          return false;
2574        }
2575      }
2576      else {
2577        // Release only the same range. No partial release allowed.
2578        // Soften the requirement a bit, because the user may think he owns a smaller size
2579        // than the block is due to alignment etc.
2580        if (block->base() != addr || block->size() < size) {
2581          fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2582          shmbk_dump_info();
2583          assert(false, "invalid shmget range");
2584          return false;
2585        }
2586      }
2587    }
2588    rc = block->release();
2589    assert(rc, "release failed");
2590    // remove block from bookkeeping
2591    shmbk_unregister(block);
2592    delete block;
2593  UNLOCK_SHMBK
2594
2595  if (!rc) {
2596    warning("failed to released %lu bytes at 0x%p", size, addr);
2597  }
2598
2599  return rc;
2600}
2601
2602static bool checked_mprotect(char* addr, size_t size, int prot) {
2603
2604  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2605  // not tell me if protection failed when trying to protect an un-protectable range.
2606  //
2607  // This means if the memory was allocated using shmget/shmat, protection wont work
2608  // but mprotect will still return 0:
2609  //
2610  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2611
2612  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2613
2614  if (!rc) {
2615    const char* const s_errno = strerror(errno);
2616    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2617    return false;
2618  }
2619
2620  // mprotect success check
2621  //
2622  // Mprotect said it changed the protection but can I believe it?
2623  //
2624  // To be sure I need to check the protection afterwards. Try to
2625  // read from protected memory and check whether that causes a segfault.
2626  //
2627  if (!os::Aix::xpg_sus_mode()) {
2628
2629    if (StubRoutines::SafeFetch32_stub()) {
2630
2631      const bool read_protected =
2632        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2633         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2634
2635      if (prot & PROT_READ) {
2636        rc = !read_protected;
2637      } else {
2638        rc = read_protected;
2639      }
2640    }
2641  }
2642  if (!rc) {
2643    assert(false, "mprotect failed.");
2644  }
2645  return rc;
2646}
2647
2648// Set protections specified
2649bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2650  unsigned int p = 0;
2651  switch (prot) {
2652  case MEM_PROT_NONE: p = PROT_NONE; break;
2653  case MEM_PROT_READ: p = PROT_READ; break;
2654  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2655  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2656  default:
2657    ShouldNotReachHere();
2658  }
2659  // is_committed is unused.
2660  return checked_mprotect(addr, size, p);
2661}
2662
2663bool os::guard_memory(char* addr, size_t size) {
2664  return checked_mprotect(addr, size, PROT_NONE);
2665}
2666
2667bool os::unguard_memory(char* addr, size_t size) {
2668  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2669}
2670
2671// Large page support
2672
2673static size_t _large_page_size = 0;
2674
2675// Enable large page support if OS allows that.
2676void os::large_page_init() {
2677
2678  // Note: os::Aix::query_multipage_support must run first.
2679
2680  if (!UseLargePages) {
2681    return;
2682  }
2683
2684  if (!Aix::can_use_64K_pages()) {
2685    assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2686    UseLargePages = false;
2687    return;
2688  }
2689
2690  if (!Aix::can_use_16M_pages() && Use16MPages) {
2691    fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2692            " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2693  }
2694
2695  // Do not report 16M page alignment as part of os::_page_sizes if we are
2696  // explicitly forbidden from using 16M pages. Doing so would increase the
2697  // alignment the garbage collector calculates with, slightly increasing
2698  // heap usage. We should only pay for 16M alignment if we really want to
2699  // use 16M pages.
2700  if (Use16MPages && Aix::can_use_16M_pages()) {
2701    _large_page_size = SIZE_16M;
2702    _page_sizes[0] = SIZE_16M;
2703    _page_sizes[1] = SIZE_64K;
2704    _page_sizes[2] = SIZE_4K;
2705    _page_sizes[3] = 0;
2706  } else if (Aix::can_use_64K_pages()) {
2707    _large_page_size = SIZE_64K;
2708    _page_sizes[0] = SIZE_64K;
2709    _page_sizes[1] = SIZE_4K;
2710    _page_sizes[2] = 0;
2711  }
2712
2713  if (Verbose) {
2714    ("Default large page size is 0x%llX.", _large_page_size);
2715  }
2716} // end: os::large_page_init()
2717
2718char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2719  // "exec" is passed in but not used. Creating the shared image for
2720  // the code cache doesn't have an SHM_X executable permission to check.
2721  Unimplemented();
2722  return 0;
2723}
2724
2725bool os::release_memory_special(char* base, size_t bytes) {
2726  // detaching the SHM segment will also delete it, see reserve_memory_special()
2727  Unimplemented();
2728  return false;
2729}
2730
2731size_t os::large_page_size() {
2732  return _large_page_size;
2733}
2734
2735bool os::can_commit_large_page_memory() {
2736  // Well, sadly we cannot commit anything at all (see comment in
2737  // os::commit_memory) but we claim to so we can make use of large pages
2738  return true;
2739}
2740
2741bool os::can_execute_large_page_memory() {
2742  // We can do that
2743  return true;
2744}
2745
2746// Reserve memory at an arbitrary address, only if that area is
2747// available (and not reserved for something else).
2748char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2749
2750  bool use_mmap = false;
2751
2752  // mmap: smaller graining, no large page support
2753  // shm: large graining (256M), large page support, limited number of shm segments
2754  //
2755  // Prefer mmap wherever we either do not need large page support or have OS limits
2756
2757  if (!UseLargePages || bytes < SIZE_16M) {
2758    use_mmap = true;
2759  }
2760
2761  char* addr = NULL;
2762  if (use_mmap) {
2763    addr = reserve_mmaped_memory(bytes, requested_addr);
2764  } else {
2765    // shmat: wish address is mandatory, and do not try 16M pages here.
2766    shmatted_memory_info_t info;
2767    const int flags = RESSHM_WISHADDR_OR_FAIL;
2768    if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2769      addr = info.addr;
2770    }
2771  }
2772
2773  return addr;
2774}
2775
2776size_t os::read(int fd, void *buf, unsigned int nBytes) {
2777  return ::read(fd, buf, nBytes);
2778}
2779
2780void os::naked_short_sleep(jlong ms) {
2781  struct timespec req;
2782
2783  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2784  req.tv_sec = 0;
2785  if (ms > 0) {
2786    req.tv_nsec = (ms % 1000) * 1000000;
2787  }
2788  else {
2789    req.tv_nsec = 1;
2790  }
2791
2792  nanosleep(&req, NULL);
2793
2794  return;
2795}
2796
2797// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2798void os::infinite_sleep() {
2799  while (true) {    // sleep forever ...
2800    ::sleep(100);   // ... 100 seconds at a time
2801  }
2802}
2803
2804// Used to convert frequent JVM_Yield() to nops
2805bool os::dont_yield() {
2806  return DontYieldALot;
2807}
2808
2809void os::yield() {
2810  sched_yield();
2811}
2812
2813os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
2814
2815void os::yield_all() {
2816  // Yields to all threads, including threads with lower priorities
2817  // Threads on Linux are all with same priority. The Solaris style
2818  // os::yield_all() with nanosleep(1ms) is not necessary.
2819  sched_yield();
2820}
2821
2822////////////////////////////////////////////////////////////////////////////////
2823// thread priority support
2824
2825// From AIX manpage to pthread_setschedparam
2826// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2827//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2828//
2829// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2830// range from 40 to 80, where 40 is the least favored priority and 80
2831// is the most favored."
2832//
2833// (Actually, I doubt this even has an impact on AIX, as we do kernel
2834// scheduling there; however, this still leaves iSeries.)
2835//
2836// We use the same values for AIX and PASE.
2837int os::java_to_os_priority[CriticalPriority + 1] = {
2838  54,             // 0 Entry should never be used
2839
2840  55,             // 1 MinPriority
2841  55,             // 2
2842  56,             // 3
2843
2844  56,             // 4
2845  57,             // 5 NormPriority
2846  57,             // 6
2847
2848  58,             // 7
2849  58,             // 8
2850  59,             // 9 NearMaxPriority
2851
2852  60,             // 10 MaxPriority
2853
2854  60              // 11 CriticalPriority
2855};
2856
2857OSReturn os::set_native_priority(Thread* thread, int newpri) {
2858  if (!UseThreadPriorities) return OS_OK;
2859  pthread_t thr = thread->osthread()->pthread_id();
2860  int policy = SCHED_OTHER;
2861  struct sched_param param;
2862  param.sched_priority = newpri;
2863  int ret = pthread_setschedparam(thr, policy, &param);
2864
2865  if (Verbose) {
2866    if (ret == 0) {
2867      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
2868    } else {
2869      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
2870              (int)thr, newpri, ret, strerror(ret));
2871    }
2872  }
2873  return (ret == 0) ? OS_OK : OS_ERR;
2874}
2875
2876OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2877  if (!UseThreadPriorities) {
2878    *priority_ptr = java_to_os_priority[NormPriority];
2879    return OS_OK;
2880  }
2881  pthread_t thr = thread->osthread()->pthread_id();
2882  int policy = SCHED_OTHER;
2883  struct sched_param param;
2884  int ret = pthread_getschedparam(thr, &policy, &param);
2885  *priority_ptr = param.sched_priority;
2886
2887  return (ret == 0) ? OS_OK : OS_ERR;
2888}
2889
2890// Hint to the underlying OS that a task switch would not be good.
2891// Void return because it's a hint and can fail.
2892void os::hint_no_preempt() {}
2893
2894////////////////////////////////////////////////////////////////////////////////
2895// suspend/resume support
2896
2897//  the low-level signal-based suspend/resume support is a remnant from the
2898//  old VM-suspension that used to be for java-suspension, safepoints etc,
2899//  within hotspot. Now there is a single use-case for this:
2900//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2901//      that runs in the watcher thread.
2902//  The remaining code is greatly simplified from the more general suspension
2903//  code that used to be used.
2904//
2905//  The protocol is quite simple:
2906//  - suspend:
2907//      - sends a signal to the target thread
2908//      - polls the suspend state of the osthread using a yield loop
2909//      - target thread signal handler (SR_handler) sets suspend state
2910//        and blocks in sigsuspend until continued
2911//  - resume:
2912//      - sets target osthread state to continue
2913//      - sends signal to end the sigsuspend loop in the SR_handler
2914//
2915//  Note that the SR_lock plays no role in this suspend/resume protocol.
2916//
2917
2918static void resume_clear_context(OSThread *osthread) {
2919  osthread->set_ucontext(NULL);
2920  osthread->set_siginfo(NULL);
2921}
2922
2923static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2924  osthread->set_ucontext(context);
2925  osthread->set_siginfo(siginfo);
2926}
2927
2928//
2929// Handler function invoked when a thread's execution is suspended or
2930// resumed. We have to be careful that only async-safe functions are
2931// called here (Note: most pthread functions are not async safe and
2932// should be avoided.)
2933//
2934// Note: sigwait() is a more natural fit than sigsuspend() from an
2935// interface point of view, but sigwait() prevents the signal hander
2936// from being run. libpthread would get very confused by not having
2937// its signal handlers run and prevents sigwait()'s use with the
2938// mutex granting granting signal.
2939//
2940// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2941//
2942static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2943  // Save and restore errno to avoid confusing native code with EINTR
2944  // after sigsuspend.
2945  int old_errno = errno;
2946
2947  Thread* thread = Thread::current();
2948  OSThread* osthread = thread->osthread();
2949  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2950
2951  os::SuspendResume::State current = osthread->sr.state();
2952  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2953    suspend_save_context(osthread, siginfo, context);
2954
2955    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2956    os::SuspendResume::State state = osthread->sr.suspended();
2957    if (state == os::SuspendResume::SR_SUSPENDED) {
2958      sigset_t suspend_set;  // signals for sigsuspend()
2959
2960      // get current set of blocked signals and unblock resume signal
2961      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2962      sigdelset(&suspend_set, SR_signum);
2963
2964      // wait here until we are resumed
2965      while (1) {
2966        sigsuspend(&suspend_set);
2967
2968        os::SuspendResume::State result = osthread->sr.running();
2969        if (result == os::SuspendResume::SR_RUNNING) {
2970          break;
2971        }
2972      }
2973
2974    } else if (state == os::SuspendResume::SR_RUNNING) {
2975      // request was cancelled, continue
2976    } else {
2977      ShouldNotReachHere();
2978    }
2979
2980    resume_clear_context(osthread);
2981  } else if (current == os::SuspendResume::SR_RUNNING) {
2982    // request was cancelled, continue
2983  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2984    // ignore
2985  } else {
2986    ShouldNotReachHere();
2987  }
2988
2989  errno = old_errno;
2990}
2991
2992
2993static int SR_initialize() {
2994  struct sigaction act;
2995  char *s;
2996  // Get signal number to use for suspend/resume
2997  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2998    int sig = ::strtol(s, 0, 10);
2999    if (sig > 0 || sig < NSIG) {
3000      SR_signum = sig;
3001    }
3002  }
3003
3004  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
3005        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
3006
3007  sigemptyset(&SR_sigset);
3008  sigaddset(&SR_sigset, SR_signum);
3009
3010  // Set up signal handler for suspend/resume.
3011  act.sa_flags = SA_RESTART|SA_SIGINFO;
3012  act.sa_handler = (void (*)(int)) SR_handler;
3013
3014  // SR_signum is blocked by default.
3015  // 4528190 - We also need to block pthread restart signal (32 on all
3016  // supported Linux platforms). Note that LinuxThreads need to block
3017  // this signal for all threads to work properly. So we don't have
3018  // to use hard-coded signal number when setting up the mask.
3019  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3020
3021  if (sigaction(SR_signum, &act, 0) == -1) {
3022    return -1;
3023  }
3024
3025  // Save signal flag
3026  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3027  return 0;
3028}
3029
3030static int SR_finalize() {
3031  return 0;
3032}
3033
3034static int sr_notify(OSThread* osthread) {
3035  int status = pthread_kill(osthread->pthread_id(), SR_signum);
3036  assert_status(status == 0, status, "pthread_kill");
3037  return status;
3038}
3039
3040// "Randomly" selected value for how long we want to spin
3041// before bailing out on suspending a thread, also how often
3042// we send a signal to a thread we want to resume
3043static const int RANDOMLY_LARGE_INTEGER = 1000000;
3044static const int RANDOMLY_LARGE_INTEGER2 = 100;
3045
3046// returns true on success and false on error - really an error is fatal
3047// but this seems the normal response to library errors
3048static bool do_suspend(OSThread* osthread) {
3049  assert(osthread->sr.is_running(), "thread should be running");
3050  // mark as suspended and send signal
3051
3052  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3053    // failed to switch, state wasn't running?
3054    ShouldNotReachHere();
3055    return false;
3056  }
3057
3058  if (sr_notify(osthread) != 0) {
3059    // try to cancel, switch to running
3060
3061    os::SuspendResume::State result = osthread->sr.cancel_suspend();
3062    if (result == os::SuspendResume::SR_RUNNING) {
3063      // cancelled
3064      return false;
3065    } else if (result == os::SuspendResume::SR_SUSPENDED) {
3066      // somehow managed to suspend
3067      return true;
3068    } else {
3069      ShouldNotReachHere();
3070      return false;
3071    }
3072  }
3073
3074  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3075
3076  for (int n = 0; !osthread->sr.is_suspended(); n++) {
3077    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3078      os::yield_all();
3079    }
3080
3081    // timeout, try to cancel the request
3082    if (n >= RANDOMLY_LARGE_INTEGER) {
3083      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3084      if (cancelled == os::SuspendResume::SR_RUNNING) {
3085        return false;
3086      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3087        return true;
3088      } else {
3089        ShouldNotReachHere();
3090        return false;
3091      }
3092    }
3093  }
3094
3095  guarantee(osthread->sr.is_suspended(), "Must be suspended");
3096  return true;
3097}
3098
3099static void do_resume(OSThread* osthread) {
3100  //assert(osthread->sr.is_suspended(), "thread should be suspended");
3101
3102  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3103    // failed to switch to WAKEUP_REQUEST
3104    ShouldNotReachHere();
3105    return;
3106  }
3107
3108  while (!osthread->sr.is_running()) {
3109    if (sr_notify(osthread) == 0) {
3110      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3111        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3112          os::yield_all();
3113        }
3114      }
3115    } else {
3116      ShouldNotReachHere();
3117    }
3118  }
3119
3120  guarantee(osthread->sr.is_running(), "Must be running!");
3121}
3122
3123///////////////////////////////////////////////////////////////////////////////////
3124// signal handling (except suspend/resume)
3125
3126// This routine may be used by user applications as a "hook" to catch signals.
3127// The user-defined signal handler must pass unrecognized signals to this
3128// routine, and if it returns true (non-zero), then the signal handler must
3129// return immediately. If the flag "abort_if_unrecognized" is true, then this
3130// routine will never retun false (zero), but instead will execute a VM panic
3131// routine kill the process.
3132//
3133// If this routine returns false, it is OK to call it again. This allows
3134// the user-defined signal handler to perform checks either before or after
3135// the VM performs its own checks. Naturally, the user code would be making
3136// a serious error if it tried to handle an exception (such as a null check
3137// or breakpoint) that the VM was generating for its own correct operation.
3138//
3139// This routine may recognize any of the following kinds of signals:
3140//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3141// It should be consulted by handlers for any of those signals.
3142//
3143// The caller of this routine must pass in the three arguments supplied
3144// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3145// field of the structure passed to sigaction(). This routine assumes that
3146// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3147//
3148// Note that the VM will print warnings if it detects conflicting signal
3149// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3150//
3151extern "C" JNIEXPORT int
3152JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3153
3154// Set thread signal mask (for some reason on AIX sigthreadmask() seems
3155// to be the thing to call; documentation is not terribly clear about whether
3156// pthread_sigmask also works, and if it does, whether it does the same.
3157bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3158  const int rc = ::pthread_sigmask(how, set, oset);
3159  // return value semantics differ slightly for error case:
3160  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3161  // (so, pthread_sigmask is more theadsafe for error handling)
3162  // But success is always 0.
3163  return rc == 0 ? true : false;
3164}
3165
3166// Function to unblock all signals which are, according
3167// to POSIX, typical program error signals. If they happen while being blocked,
3168// they typically will bring down the process immediately.
3169bool unblock_program_error_signals() {
3170  sigset_t set;
3171  ::sigemptyset(&set);
3172  ::sigaddset(&set, SIGILL);
3173  ::sigaddset(&set, SIGBUS);
3174  ::sigaddset(&set, SIGFPE);
3175  ::sigaddset(&set, SIGSEGV);
3176  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3177}
3178
3179// Renamed from 'signalHandler' to avoid collision with other shared libs.
3180void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3181  assert(info != NULL && uc != NULL, "it must be old kernel");
3182
3183  // Never leave program error signals blocked;
3184  // on all our platforms they would bring down the process immediately when
3185  // getting raised while being blocked.
3186  unblock_program_error_signals();
3187
3188  JVM_handle_aix_signal(sig, info, uc, true);
3189}
3190
3191
3192// This boolean allows users to forward their own non-matching signals
3193// to JVM_handle_aix_signal, harmlessly.
3194bool os::Aix::signal_handlers_are_installed = false;
3195
3196// For signal-chaining
3197struct sigaction os::Aix::sigact[MAXSIGNUM];
3198unsigned int os::Aix::sigs = 0;
3199bool os::Aix::libjsig_is_loaded = false;
3200typedef struct sigaction *(*get_signal_t)(int);
3201get_signal_t os::Aix::get_signal_action = NULL;
3202
3203struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3204  struct sigaction *actp = NULL;
3205
3206  if (libjsig_is_loaded) {
3207    // Retrieve the old signal handler from libjsig
3208    actp = (*get_signal_action)(sig);
3209  }
3210  if (actp == NULL) {
3211    // Retrieve the preinstalled signal handler from jvm
3212    actp = get_preinstalled_handler(sig);
3213  }
3214
3215  return actp;
3216}
3217
3218static bool call_chained_handler(struct sigaction *actp, int sig,
3219                                 siginfo_t *siginfo, void *context) {
3220  // Call the old signal handler
3221  if (actp->sa_handler == SIG_DFL) {
3222    // It's more reasonable to let jvm treat it as an unexpected exception
3223    // instead of taking the default action.
3224    return false;
3225  } else if (actp->sa_handler != SIG_IGN) {
3226    if ((actp->sa_flags & SA_NODEFER) == 0) {
3227      // automaticlly block the signal
3228      sigaddset(&(actp->sa_mask), sig);
3229    }
3230
3231    sa_handler_t hand = NULL;
3232    sa_sigaction_t sa = NULL;
3233    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3234    // retrieve the chained handler
3235    if (siginfo_flag_set) {
3236      sa = actp->sa_sigaction;
3237    } else {
3238      hand = actp->sa_handler;
3239    }
3240
3241    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3242      actp->sa_handler = SIG_DFL;
3243    }
3244
3245    // try to honor the signal mask
3246    sigset_t oset;
3247    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3248
3249    // call into the chained handler
3250    if (siginfo_flag_set) {
3251      (*sa)(sig, siginfo, context);
3252    } else {
3253      (*hand)(sig);
3254    }
3255
3256    // restore the signal mask
3257    pthread_sigmask(SIG_SETMASK, &oset, 0);
3258  }
3259  // Tell jvm's signal handler the signal is taken care of.
3260  return true;
3261}
3262
3263bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3264  bool chained = false;
3265  // signal-chaining
3266  if (UseSignalChaining) {
3267    struct sigaction *actp = get_chained_signal_action(sig);
3268    if (actp != NULL) {
3269      chained = call_chained_handler(actp, sig, siginfo, context);
3270    }
3271  }
3272  return chained;
3273}
3274
3275struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3276  if ((((unsigned int)1 << sig) & sigs) != 0) {
3277    return &sigact[sig];
3278  }
3279  return NULL;
3280}
3281
3282void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3283  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3284  sigact[sig] = oldAct;
3285  sigs |= (unsigned int)1 << sig;
3286}
3287
3288// for diagnostic
3289int os::Aix::sigflags[MAXSIGNUM];
3290
3291int os::Aix::get_our_sigflags(int sig) {
3292  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3293  return sigflags[sig];
3294}
3295
3296void os::Aix::set_our_sigflags(int sig, int flags) {
3297  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3298  sigflags[sig] = flags;
3299}
3300
3301void os::Aix::set_signal_handler(int sig, bool set_installed) {
3302  // Check for overwrite.
3303  struct sigaction oldAct;
3304  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3305
3306  void* oldhand = oldAct.sa_sigaction
3307    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3308    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3309  // Renamed 'signalHandler' to avoid collision with other shared libs.
3310  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3311      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3312      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3313    if (AllowUserSignalHandlers || !set_installed) {
3314      // Do not overwrite; user takes responsibility to forward to us.
3315      return;
3316    } else if (UseSignalChaining) {
3317      // save the old handler in jvm
3318      save_preinstalled_handler(sig, oldAct);
3319      // libjsig also interposes the sigaction() call below and saves the
3320      // old sigaction on it own.
3321    } else {
3322      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3323                    "%#lx for signal %d.", (long)oldhand, sig));
3324    }
3325  }
3326
3327  struct sigaction sigAct;
3328  sigfillset(&(sigAct.sa_mask));
3329  if (!set_installed) {
3330    sigAct.sa_handler = SIG_DFL;
3331    sigAct.sa_flags = SA_RESTART;
3332  } else {
3333    // Renamed 'signalHandler' to avoid collision with other shared libs.
3334    sigAct.sa_sigaction = javaSignalHandler;
3335    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3336  }
3337  // Save flags, which are set by ours
3338  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3339  sigflags[sig] = sigAct.sa_flags;
3340
3341  int ret = sigaction(sig, &sigAct, &oldAct);
3342  assert(ret == 0, "check");
3343
3344  void* oldhand2 = oldAct.sa_sigaction
3345                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3346                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3347  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3348}
3349
3350// install signal handlers for signals that HotSpot needs to
3351// handle in order to support Java-level exception handling.
3352void os::Aix::install_signal_handlers() {
3353  if (!signal_handlers_are_installed) {
3354    signal_handlers_are_installed = true;
3355
3356    // signal-chaining
3357    typedef void (*signal_setting_t)();
3358    signal_setting_t begin_signal_setting = NULL;
3359    signal_setting_t end_signal_setting = NULL;
3360    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3361                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3362    if (begin_signal_setting != NULL) {
3363      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3364                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3365      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3366                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3367      libjsig_is_loaded = true;
3368      assert(UseSignalChaining, "should enable signal-chaining");
3369    }
3370    if (libjsig_is_loaded) {
3371      // Tell libjsig jvm is setting signal handlers
3372      (*begin_signal_setting)();
3373    }
3374
3375    set_signal_handler(SIGSEGV, true);
3376    set_signal_handler(SIGPIPE, true);
3377    set_signal_handler(SIGBUS, true);
3378    set_signal_handler(SIGILL, true);
3379    set_signal_handler(SIGFPE, true);
3380    set_signal_handler(SIGTRAP, true);
3381    set_signal_handler(SIGXFSZ, true);
3382    set_signal_handler(SIGDANGER, true);
3383
3384    if (libjsig_is_loaded) {
3385      // Tell libjsig jvm finishes setting signal handlers
3386      (*end_signal_setting)();
3387    }
3388
3389    // We don't activate signal checker if libjsig is in place, we trust ourselves
3390    // and if UserSignalHandler is installed all bets are off.
3391    // Log that signal checking is off only if -verbose:jni is specified.
3392    if (CheckJNICalls) {
3393      if (libjsig_is_loaded) {
3394        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3395        check_signals = false;
3396      }
3397      if (AllowUserSignalHandlers) {
3398        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3399        check_signals = false;
3400      }
3401      // need to initialize check_signal_done
3402      ::sigemptyset(&check_signal_done);
3403    }
3404  }
3405}
3406
3407static const char* get_signal_handler_name(address handler,
3408                                           char* buf, int buflen) {
3409  int offset;
3410  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3411  if (found) {
3412    // skip directory names
3413    const char *p1, *p2;
3414    p1 = buf;
3415    size_t len = strlen(os::file_separator());
3416    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3417    // The way os::dll_address_to_library_name is implemented on Aix
3418    // right now, it always returns -1 for the offset which is not
3419    // terribly informative.
3420    // Will fix that. For now, omit the offset.
3421    jio_snprintf(buf, buflen, "%s", p1);
3422  } else {
3423    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3424  }
3425  return buf;
3426}
3427
3428static void print_signal_handler(outputStream* st, int sig,
3429                                 char* buf, size_t buflen) {
3430  struct sigaction sa;
3431  sigaction(sig, NULL, &sa);
3432
3433  st->print("%s: ", os::exception_name(sig, buf, buflen));
3434
3435  address handler = (sa.sa_flags & SA_SIGINFO)
3436    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3437    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3438
3439  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3440    st->print("SIG_DFL");
3441  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3442    st->print("SIG_IGN");
3443  } else {
3444    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3445  }
3446
3447  // Print readable mask.
3448  st->print(", sa_mask[0]=");
3449  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3450
3451  address rh = VMError::get_resetted_sighandler(sig);
3452  // May be, handler was resetted by VMError?
3453  if (rh != NULL) {
3454    handler = rh;
3455    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3456  }
3457
3458  // Print textual representation of sa_flags.
3459  st->print(", sa_flags=");
3460  os::Posix::print_sa_flags(st, sa.sa_flags);
3461
3462  // Check: is it our handler?
3463  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3464      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3465    // It is our signal handler.
3466    // Check for flags, reset system-used one!
3467    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3468      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3469                os::Aix::get_our_sigflags(sig));
3470    }
3471  }
3472  st->cr();
3473}
3474
3475
3476#define DO_SIGNAL_CHECK(sig) \
3477  if (!sigismember(&check_signal_done, sig)) \
3478    os::Aix::check_signal_handler(sig)
3479
3480// This method is a periodic task to check for misbehaving JNI applications
3481// under CheckJNI, we can add any periodic checks here
3482
3483void os::run_periodic_checks() {
3484
3485  if (check_signals == false) return;
3486
3487  // SEGV and BUS if overridden could potentially prevent
3488  // generation of hs*.log in the event of a crash, debugging
3489  // such a case can be very challenging, so we absolutely
3490  // check the following for a good measure:
3491  DO_SIGNAL_CHECK(SIGSEGV);
3492  DO_SIGNAL_CHECK(SIGILL);
3493  DO_SIGNAL_CHECK(SIGFPE);
3494  DO_SIGNAL_CHECK(SIGBUS);
3495  DO_SIGNAL_CHECK(SIGPIPE);
3496  DO_SIGNAL_CHECK(SIGXFSZ);
3497  if (UseSIGTRAP) {
3498    DO_SIGNAL_CHECK(SIGTRAP);
3499  }
3500  DO_SIGNAL_CHECK(SIGDANGER);
3501
3502  // ReduceSignalUsage allows the user to override these handlers
3503  // see comments at the very top and jvm_solaris.h
3504  if (!ReduceSignalUsage) {
3505    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3506    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3507    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3508    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3509  }
3510
3511  DO_SIGNAL_CHECK(SR_signum);
3512  DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3513}
3514
3515typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3516
3517static os_sigaction_t os_sigaction = NULL;
3518
3519void os::Aix::check_signal_handler(int sig) {
3520  char buf[O_BUFLEN];
3521  address jvmHandler = NULL;
3522
3523  struct sigaction act;
3524  if (os_sigaction == NULL) {
3525    // only trust the default sigaction, in case it has been interposed
3526    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3527    if (os_sigaction == NULL) return;
3528  }
3529
3530  os_sigaction(sig, (struct sigaction*)NULL, &act);
3531
3532  address thisHandler = (act.sa_flags & SA_SIGINFO)
3533    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3534    : CAST_FROM_FN_PTR(address, act.sa_handler);
3535
3536
3537  switch(sig) {
3538  case SIGSEGV:
3539  case SIGBUS:
3540  case SIGFPE:
3541  case SIGPIPE:
3542  case SIGILL:
3543  case SIGXFSZ:
3544    // Renamed 'signalHandler' to avoid collision with other shared libs.
3545    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3546    break;
3547
3548  case SHUTDOWN1_SIGNAL:
3549  case SHUTDOWN2_SIGNAL:
3550  case SHUTDOWN3_SIGNAL:
3551  case BREAK_SIGNAL:
3552    jvmHandler = (address)user_handler();
3553    break;
3554
3555  case INTERRUPT_SIGNAL:
3556    jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3557    break;
3558
3559  default:
3560    if (sig == SR_signum) {
3561      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3562    } else {
3563      return;
3564    }
3565    break;
3566  }
3567
3568  if (thisHandler != jvmHandler) {
3569    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3570    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3571    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3572    // No need to check this sig any longer
3573    sigaddset(&check_signal_done, sig);
3574    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3575    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3576      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3577                    exception_name(sig, buf, O_BUFLEN));
3578    }
3579  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3580    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3581    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3582    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3583    // No need to check this sig any longer
3584    sigaddset(&check_signal_done, sig);
3585  }
3586
3587  // Dump all the signal
3588  if (sigismember(&check_signal_done, sig)) {
3589    print_signal_handlers(tty, buf, O_BUFLEN);
3590  }
3591}
3592
3593extern bool signal_name(int signo, char* buf, size_t len);
3594
3595const char* os::exception_name(int exception_code, char* buf, size_t size) {
3596  if (0 < exception_code && exception_code <= SIGRTMAX) {
3597    // signal
3598    if (!signal_name(exception_code, buf, size)) {
3599      jio_snprintf(buf, size, "SIG%d", exception_code);
3600    }
3601    return buf;
3602  } else {
3603    return NULL;
3604  }
3605}
3606
3607// To install functions for atexit system call
3608extern "C" {
3609  static void perfMemory_exit_helper() {
3610    perfMemory_exit();
3611  }
3612}
3613
3614// This is called _before_ the most of global arguments have been parsed.
3615void os::init(void) {
3616  // This is basic, we want to know if that ever changes.
3617  // (shared memory boundary is supposed to be a 256M aligned)
3618  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3619
3620  // First off, we need to know whether we run on AIX or PASE, and
3621  // the OS level we run on.
3622  os::Aix::initialize_os_info();
3623
3624  // Scan environment (SPEC1170 behaviour, etc)
3625  os::Aix::scan_environment();
3626
3627  // Check which pages are supported by AIX.
3628  os::Aix::query_multipage_support();
3629
3630  // Next, we need to initialize libo4 and libperfstat libraries.
3631  if (os::Aix::on_pase()) {
3632    os::Aix::initialize_libo4();
3633  } else {
3634    os::Aix::initialize_libperfstat();
3635  }
3636
3637  // Reset the perfstat information provided by ODM.
3638  if (os::Aix::on_aix()) {
3639    libperfstat::perfstat_reset();
3640  }
3641
3642  // Now initialze basic system properties. Note that for some of the values we
3643  // need libperfstat etc.
3644  os::Aix::initialize_system_info();
3645
3646  // Initialize large page support.
3647  if (UseLargePages) {
3648    os::large_page_init();
3649    if (!UseLargePages) {
3650      // initialize os::_page_sizes
3651      _page_sizes[0] = Aix::page_size();
3652      _page_sizes[1] = 0;
3653      if (Verbose) {
3654        fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3655      }
3656    }
3657  } else {
3658    // initialize os::_page_sizes
3659    _page_sizes[0] = Aix::page_size();
3660    _page_sizes[1] = 0;
3661  }
3662
3663  // debug trace
3664  if (Verbose) {
3665    fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3666    fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3667    fprintf(stderr, "os::_page_sizes = ( ");
3668    for (int i = 0; _page_sizes[i]; i ++) {
3669      fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3670    }
3671    fprintf(stderr, ")\n");
3672  }
3673
3674  _initial_pid = getpid();
3675
3676  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3677
3678  init_random(1234567);
3679
3680  ThreadCritical::initialize();
3681
3682  // Main_thread points to the aboriginal thread.
3683  Aix::_main_thread = pthread_self();
3684
3685  initial_time_count = os::elapsed_counter();
3686  pthread_mutex_init(&dl_mutex, NULL);
3687}
3688
3689// this is called _after_ the global arguments have been parsed
3690jint os::init_2(void) {
3691
3692  if (Verbose) {
3693    fprintf(stderr, "processor count: %d\n", os::_processor_count);
3694    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
3695  }
3696
3697  // initially build up the loaded dll map
3698  LoadedLibraries::reload();
3699
3700  const int page_size = Aix::page_size();
3701  const int map_size = page_size;
3702
3703  address map_address = (address) MAP_FAILED;
3704  const int prot  = PROT_READ;
3705  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3706
3707  // use optimized addresses for the polling page,
3708  // e.g. map it to a special 32-bit address.
3709  if (OptimizePollingPageLocation) {
3710    // architecture-specific list of address wishes:
3711    address address_wishes[] = {
3712      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3713      // PPC64: all address wishes are non-negative 32 bit values where
3714      // the lower 16 bits are all zero. we can load these addresses
3715      // with a single ppc_lis instruction.
3716      (address) 0x30000000, (address) 0x31000000,
3717      (address) 0x32000000, (address) 0x33000000,
3718      (address) 0x40000000, (address) 0x41000000,
3719      (address) 0x42000000, (address) 0x43000000,
3720      (address) 0x50000000, (address) 0x51000000,
3721      (address) 0x52000000, (address) 0x53000000,
3722      (address) 0x60000000, (address) 0x61000000,
3723      (address) 0x62000000, (address) 0x63000000
3724    };
3725    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3726
3727    // iterate over the list of address wishes:
3728    for (int i=0; i<address_wishes_length; i++) {
3729      // try to map with current address wish.
3730      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3731      // fail if the address is already mapped.
3732      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3733                                     map_size, prot,
3734                                     flags | MAP_FIXED,
3735                                     -1, 0);
3736      if (Verbose) {
3737        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3738                address_wishes[i], map_address + (ssize_t)page_size);
3739      }
3740
3741      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3742        // map succeeded and map_address is at wished address, exit loop.
3743        break;
3744      }
3745
3746      if (map_address != (address) MAP_FAILED) {
3747        // map succeeded, but polling_page is not at wished address, unmap and continue.
3748        ::munmap(map_address, map_size);
3749        map_address = (address) MAP_FAILED;
3750      }
3751      // map failed, continue loop.
3752    }
3753  } // end OptimizePollingPageLocation
3754
3755  if (map_address == (address) MAP_FAILED) {
3756    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3757  }
3758  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3759  os::set_polling_page(map_address);
3760
3761  if (!UseMembar) {
3762    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3763    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3764    os::set_memory_serialize_page(mem_serialize_page);
3765
3766#ifndef PRODUCT
3767    if (Verbose && PrintMiscellaneous)
3768      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3769#endif
3770  }
3771
3772  // initialize suspend/resume support - must do this before signal_sets_init()
3773  if (SR_initialize() != 0) {
3774    perror("SR_initialize failed");
3775    return JNI_ERR;
3776  }
3777
3778  Aix::signal_sets_init();
3779  Aix::install_signal_handlers();
3780
3781  // Check minimum allowable stack size for thread creation and to initialize
3782  // the java system classes, including StackOverflowError - depends on page
3783  // size. Add a page for compiler2 recursion in main thread.
3784  // Add in 2*BytesPerWord times page size to account for VM stack during
3785  // class initialization depending on 32 or 64 bit VM.
3786  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3787            (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3788                     2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3789
3790  size_t threadStackSizeInBytes = ThreadStackSize * K;
3791  if (threadStackSizeInBytes != 0 &&
3792      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3793        tty->print_cr("\nThe stack size specified is too small, "
3794                      "Specify at least %dk",
3795                      os::Aix::min_stack_allowed / K);
3796        return JNI_ERR;
3797  }
3798
3799  // Make the stack size a multiple of the page size so that
3800  // the yellow/red zones can be guarded.
3801  // note that this can be 0, if no default stacksize was set
3802  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3803
3804  Aix::libpthread_init();
3805
3806  if (MaxFDLimit) {
3807    // set the number of file descriptors to max. print out error
3808    // if getrlimit/setrlimit fails but continue regardless.
3809    struct rlimit nbr_files;
3810    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3811    if (status != 0) {
3812      if (PrintMiscellaneous && (Verbose || WizardMode))
3813        perror("os::init_2 getrlimit failed");
3814    } else {
3815      nbr_files.rlim_cur = nbr_files.rlim_max;
3816      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3817      if (status != 0) {
3818        if (PrintMiscellaneous && (Verbose || WizardMode))
3819          perror("os::init_2 setrlimit failed");
3820      }
3821    }
3822  }
3823
3824  if (PerfAllowAtExitRegistration) {
3825    // only register atexit functions if PerfAllowAtExitRegistration is set.
3826    // atexit functions can be delayed until process exit time, which
3827    // can be problematic for embedded VM situations. Embedded VMs should
3828    // call DestroyJavaVM() to assure that VM resources are released.
3829
3830    // note: perfMemory_exit_helper atexit function may be removed in
3831    // the future if the appropriate cleanup code can be added to the
3832    // VM_Exit VMOperation's doit method.
3833    if (atexit(perfMemory_exit_helper) != 0) {
3834      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3835    }
3836  }
3837
3838  return JNI_OK;
3839}
3840
3841// this is called at the end of vm_initialization
3842void os::init_3(void) {
3843  return;
3844}
3845
3846// Mark the polling page as unreadable
3847void os::make_polling_page_unreadable(void) {
3848  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3849    fatal("Could not disable polling page");
3850  }
3851};
3852
3853// Mark the polling page as readable
3854void os::make_polling_page_readable(void) {
3855  // Changed according to os_linux.cpp.
3856  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3857    fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3858  }
3859};
3860
3861int os::active_processor_count() {
3862  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3863  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3864  return online_cpus;
3865}
3866
3867void os::set_native_thread_name(const char *name) {
3868  // Not yet implemented.
3869  return;
3870}
3871
3872bool os::distribute_processes(uint length, uint* distribution) {
3873  // Not yet implemented.
3874  return false;
3875}
3876
3877bool os::bind_to_processor(uint processor_id) {
3878  // Not yet implemented.
3879  return false;
3880}
3881
3882void os::SuspendedThreadTask::internal_do_task() {
3883  if (do_suspend(_thread->osthread())) {
3884    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3885    do_task(context);
3886    do_resume(_thread->osthread());
3887  }
3888}
3889
3890class PcFetcher : public os::SuspendedThreadTask {
3891public:
3892  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3893  ExtendedPC result();
3894protected:
3895  void do_task(const os::SuspendedThreadTaskContext& context);
3896private:
3897  ExtendedPC _epc;
3898};
3899
3900ExtendedPC PcFetcher::result() {
3901  guarantee(is_done(), "task is not done yet.");
3902  return _epc;
3903}
3904
3905void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3906  Thread* thread = context.thread();
3907  OSThread* osthread = thread->osthread();
3908  if (osthread->ucontext() != NULL) {
3909    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3910  } else {
3911    // NULL context is unexpected, double-check this is the VMThread.
3912    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3913  }
3914}
3915
3916// Suspends the target using the signal mechanism and then grabs the PC before
3917// resuming the target. Used by the flat-profiler only
3918ExtendedPC os::get_thread_pc(Thread* thread) {
3919  // Make sure that it is called by the watcher for the VMThread.
3920  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3921  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3922
3923  PcFetcher fetcher(thread);
3924  fetcher.run();
3925  return fetcher.result();
3926}
3927
3928// Not neede on Aix.
3929// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
3930// }
3931
3932////////////////////////////////////////////////////////////////////////////////
3933// debug support
3934
3935static address same_page(address x, address y) {
3936  intptr_t page_bits = -os::vm_page_size();
3937  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3938    return x;
3939  else if (x > y)
3940    return (address)(intptr_t(y) | ~page_bits) + 1;
3941  else
3942    return (address)(intptr_t(y) & page_bits);
3943}
3944
3945bool os::find(address addr, outputStream* st) {
3946
3947  st->print(PTR_FORMAT ": ", addr);
3948
3949  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3950  if (lib) {
3951    lib->print(st);
3952    return true;
3953  } else {
3954    lib = LoadedLibraries::find_for_data_address(addr);
3955    if (lib) {
3956      lib->print(st);
3957      return true;
3958    } else {
3959      st->print_cr("(outside any module)");
3960    }
3961  }
3962
3963  return false;
3964}
3965
3966////////////////////////////////////////////////////////////////////////////////
3967// misc
3968
3969// This does not do anything on Aix. This is basically a hook for being
3970// able to use structured exception handling (thread-local exception filters)
3971// on, e.g., Win32.
3972void
3973os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3974                         JavaCallArguments* args, Thread* thread) {
3975  f(value, method, args, thread);
3976}
3977
3978void os::print_statistics() {
3979}
3980
3981int os::message_box(const char* title, const char* message) {
3982  int i;
3983  fdStream err(defaultStream::error_fd());
3984  for (i = 0; i < 78; i++) err.print_raw("=");
3985  err.cr();
3986  err.print_raw_cr(title);
3987  for (i = 0; i < 78; i++) err.print_raw("-");
3988  err.cr();
3989  err.print_raw_cr(message);
3990  for (i = 0; i < 78; i++) err.print_raw("=");
3991  err.cr();
3992
3993  char buf[16];
3994  // Prevent process from exiting upon "read error" without consuming all CPU
3995  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3996
3997  return buf[0] == 'y' || buf[0] == 'Y';
3998}
3999
4000int os::stat(const char *path, struct stat *sbuf) {
4001  char pathbuf[MAX_PATH];
4002  if (strlen(path) > MAX_PATH - 1) {
4003    errno = ENAMETOOLONG;
4004    return -1;
4005  }
4006  os::native_path(strcpy(pathbuf, path));
4007  return ::stat(pathbuf, sbuf);
4008}
4009
4010bool os::check_heap(bool force) {
4011  return true;
4012}
4013
4014// int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
4015//   return ::vsnprintf(buf, count, format, args);
4016// }
4017
4018// Is a (classpath) directory empty?
4019bool os::dir_is_empty(const char* path) {
4020  DIR *dir = NULL;
4021  struct dirent *ptr;
4022
4023  dir = opendir(path);
4024  if (dir == NULL) return true;
4025
4026  /* Scan the directory */
4027  bool result = true;
4028  char buf[sizeof(struct dirent) + MAX_PATH];
4029  while (result && (ptr = ::readdir(dir)) != NULL) {
4030    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4031      result = false;
4032    }
4033  }
4034  closedir(dir);
4035  return result;
4036}
4037
4038// This code originates from JDK's sysOpen and open64_w
4039// from src/solaris/hpi/src/system_md.c
4040
4041#ifndef O_DELETE
4042#define O_DELETE 0x10000
4043#endif
4044
4045// Open a file. Unlink the file immediately after open returns
4046// if the specified oflag has the O_DELETE flag set.
4047// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4048
4049int os::open(const char *path, int oflag, int mode) {
4050
4051  if (strlen(path) > MAX_PATH - 1) {
4052    errno = ENAMETOOLONG;
4053    return -1;
4054  }
4055  int fd;
4056  int o_delete = (oflag & O_DELETE);
4057  oflag = oflag & ~O_DELETE;
4058
4059  fd = ::open64(path, oflag, mode);
4060  if (fd == -1) return -1;
4061
4062  // If the open succeeded, the file might still be a directory.
4063  {
4064    struct stat64 buf64;
4065    int ret = ::fstat64(fd, &buf64);
4066    int st_mode = buf64.st_mode;
4067
4068    if (ret != -1) {
4069      if ((st_mode & S_IFMT) == S_IFDIR) {
4070        errno = EISDIR;
4071        ::close(fd);
4072        return -1;
4073      }
4074    } else {
4075      ::close(fd);
4076      return -1;
4077    }
4078  }
4079
4080  // All file descriptors that are opened in the JVM and not
4081  // specifically destined for a subprocess should have the
4082  // close-on-exec flag set. If we don't set it, then careless 3rd
4083  // party native code might fork and exec without closing all
4084  // appropriate file descriptors (e.g. as we do in closeDescriptors in
4085  // UNIXProcess.c), and this in turn might:
4086  //
4087  // - cause end-of-file to fail to be detected on some file
4088  //   descriptors, resulting in mysterious hangs, or
4089  //
4090  // - might cause an fopen in the subprocess to fail on a system
4091  //   suffering from bug 1085341.
4092  //
4093  // (Yes, the default setting of the close-on-exec flag is a Unix
4094  // design flaw.)
4095  //
4096  // See:
4097  // 1085341: 32-bit stdio routines should support file descriptors >255
4098  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4099  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4100#ifdef FD_CLOEXEC
4101  {
4102    int flags = ::fcntl(fd, F_GETFD);
4103    if (flags != -1)
4104      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4105  }
4106#endif
4107
4108  if (o_delete != 0) {
4109    ::unlink(path);
4110  }
4111  return fd;
4112}
4113
4114
4115// create binary file, rewriting existing file if required
4116int os::create_binary_file(const char* path, bool rewrite_existing) {
4117  int oflags = O_WRONLY | O_CREAT;
4118  if (!rewrite_existing) {
4119    oflags |= O_EXCL;
4120  }
4121  return ::open64(path, oflags, S_IREAD | S_IWRITE);
4122}
4123
4124// return current position of file pointer
4125jlong os::current_file_offset(int fd) {
4126  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4127}
4128
4129// move file pointer to the specified offset
4130jlong os::seek_to_file_offset(int fd, jlong offset) {
4131  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4132}
4133
4134// This code originates from JDK's sysAvailable
4135// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4136
4137int os::available(int fd, jlong *bytes) {
4138  jlong cur, end;
4139  int mode;
4140  struct stat64 buf64;
4141
4142  if (::fstat64(fd, &buf64) >= 0) {
4143    mode = buf64.st_mode;
4144    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4145      // XXX: is the following call interruptible? If so, this might
4146      // need to go through the INTERRUPT_IO() wrapper as for other
4147      // blocking, interruptible calls in this file.
4148      int n;
4149      if (::ioctl(fd, FIONREAD, &n) >= 0) {
4150        *bytes = n;
4151        return 1;
4152      }
4153    }
4154  }
4155  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4156    return 0;
4157  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4158    return 0;
4159  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4160    return 0;
4161  }
4162  *bytes = end - cur;
4163  return 1;
4164}
4165
4166int os::socket_available(int fd, jint *pbytes) {
4167  // Linux doc says EINTR not returned, unlike Solaris
4168  int ret = ::ioctl(fd, FIONREAD, pbytes);
4169
4170  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
4171  // is expected to return 0 on failure and 1 on success to the jdk.
4172  return (ret < 0) ? 0 : 1;
4173}
4174
4175// Map a block of memory.
4176char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4177                        char *addr, size_t bytes, bool read_only,
4178                        bool allow_exec) {
4179  Unimplemented();
4180  return NULL;
4181}
4182
4183
4184// Remap a block of memory.
4185char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4186                          char *addr, size_t bytes, bool read_only,
4187                          bool allow_exec) {
4188  // same as map_memory() on this OS
4189  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4190                        allow_exec);
4191}
4192
4193// Unmap a block of memory.
4194bool os::pd_unmap_memory(char* addr, size_t bytes) {
4195  return munmap(addr, bytes) == 0;
4196}
4197
4198// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4199// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4200// of a thread.
4201//
4202// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4203// the fast estimate available on the platform.
4204
4205jlong os::current_thread_cpu_time() {
4206  // return user + sys since the cost is the same
4207  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4208  assert(n >= 0, "negative CPU time");
4209  return n;
4210}
4211
4212jlong os::thread_cpu_time(Thread* thread) {
4213  // consistent with what current_thread_cpu_time() returns
4214  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4215  assert(n >= 0, "negative CPU time");
4216  return n;
4217}
4218
4219jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4220  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4221  assert(n >= 0, "negative CPU time");
4222  return n;
4223}
4224
4225static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4226  bool error = false;
4227
4228  jlong sys_time = 0;
4229  jlong user_time = 0;
4230
4231  // reimplemented using getthrds64().
4232  //
4233  // goes like this:
4234  // For the thread in question, get the kernel thread id. Then get the
4235  // kernel thread statistics using that id.
4236  //
4237  // This only works of course when no pthread scheduling is used,
4238  // ie there is a 1:1 relationship to kernel threads.
4239  // On AIX, see AIXTHREAD_SCOPE variable.
4240
4241  pthread_t pthtid = thread->osthread()->pthread_id();
4242
4243  // retrieve kernel thread id for the pthread:
4244  tid64_t tid = 0;
4245  struct __pthrdsinfo pinfo;
4246  // I just love those otherworldly IBM APIs which force me to hand down
4247  // dummy buffers for stuff I dont care for...
4248  char dummy[1];
4249  int dummy_size = sizeof(dummy);
4250  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4251                          dummy, &dummy_size) == 0) {
4252    tid = pinfo.__pi_tid;
4253  } else {
4254    tty->print_cr("pthread_getthrds_np failed.");
4255    error = true;
4256  }
4257
4258  // retrieve kernel timing info for that kernel thread
4259  if (!error) {
4260    struct thrdentry64 thrdentry;
4261    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4262      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4263      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4264    } else {
4265      tty->print_cr("pthread_getthrds_np failed.");
4266      error = true;
4267    }
4268  }
4269
4270  if (p_sys_time) {
4271    *p_sys_time = sys_time;
4272  }
4273
4274  if (p_user_time) {
4275    *p_user_time = user_time;
4276  }
4277
4278  if (error) {
4279    return false;
4280  }
4281
4282  return true;
4283}
4284
4285jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4286  jlong sys_time;
4287  jlong user_time;
4288
4289  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4290    return -1;
4291  }
4292
4293  return user_sys_cpu_time ? sys_time + user_time : user_time;
4294}
4295
4296void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4297  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4298  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4299  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4300  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4301}
4302
4303void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4304  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4305  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4306  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4307  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4308}
4309
4310bool os::is_thread_cpu_time_supported() {
4311  return true;
4312}
4313
4314// System loadavg support. Returns -1 if load average cannot be obtained.
4315// For now just return the system wide load average (no processor sets).
4316int os::loadavg(double values[], int nelem) {
4317
4318  // Implemented using libperfstat on AIX.
4319
4320  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4321  guarantee(values, "argument error");
4322
4323  if (os::Aix::on_pase()) {
4324    Unimplemented();
4325    return -1;
4326  } else {
4327    // AIX: use libperfstat
4328    //
4329    // See also:
4330    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4331    // /usr/include/libperfstat.h:
4332
4333    // Use the already AIX version independent get_cpuinfo.
4334    os::Aix::cpuinfo_t ci;
4335    if (os::Aix::get_cpuinfo(&ci)) {
4336      for (int i = 0; i < nelem; i++) {
4337        values[i] = ci.loadavg[i];
4338      }
4339    } else {
4340      return -1;
4341    }
4342    return nelem;
4343  }
4344}
4345
4346void os::pause() {
4347  char filename[MAX_PATH];
4348  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4349    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4350  } else {
4351    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4352  }
4353
4354  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4355  if (fd != -1) {
4356    struct stat buf;
4357    ::close(fd);
4358    while (::stat(filename, &buf) == 0) {
4359      (void)::poll(NULL, 0, 100);
4360    }
4361  } else {
4362    jio_fprintf(stderr,
4363      "Could not open pause file '%s', continuing immediately.\n", filename);
4364  }
4365}
4366
4367bool os::Aix::is_primordial_thread() {
4368  if (pthread_self() == (pthread_t)1) {
4369    return true;
4370  } else {
4371    return false;
4372  }
4373}
4374
4375// OS recognitions (PASE/AIX, OS level) call this before calling any
4376// one of Aix::on_pase(), Aix::os_version() static
4377void os::Aix::initialize_os_info() {
4378
4379  assert(_on_pase == -1 && _os_version == -1, "already called.");
4380
4381  struct utsname uts;
4382  memset(&uts, 0, sizeof(uts));
4383  strcpy(uts.sysname, "?");
4384  if (::uname(&uts) == -1) {
4385    fprintf(stderr, "uname failed (%d)\n", errno);
4386    guarantee(0, "Could not determine whether we run on AIX or PASE");
4387  } else {
4388    if (Verbose) {
4389      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4390              "node \"%s\" machine \"%s\"\n",
4391              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4392    }
4393    const int major = atoi(uts.version);
4394    assert(major > 0, "invalid OS version");
4395    const int minor = atoi(uts.release);
4396    assert(minor > 0, "invalid OS release");
4397    _os_version = (major << 8) | minor;
4398    if (strcmp(uts.sysname, "OS400") == 0) {
4399      Unimplemented();
4400    } else if (strcmp(uts.sysname, "AIX") == 0) {
4401      // We run on AIX. We do not support versions older than AIX 5.3.
4402      _on_pase = 0;
4403      if (_os_version < 0x0503) {
4404        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
4405        assert(false, "AIX release too old.");
4406      } else {
4407        if (Verbose) {
4408          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
4409        }
4410      }
4411    } else {
4412      assert(false, "unknown OS");
4413    }
4414  }
4415
4416  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4417
4418} // end: os::Aix::initialize_os_info()
4419
4420// Scan environment for important settings which might effect the VM.
4421// Trace out settings. Warn about invalid settings and/or correct them.
4422//
4423// Must run after os::Aix::initialue_os_info().
4424void os::Aix::scan_environment() {
4425
4426  char* p;
4427  int rc;
4428
4429  // Warn explicity if EXTSHM=ON is used. That switch changes how
4430  // System V shared memory behaves. One effect is that page size of
4431  // shared memory cannot be change dynamically, effectivly preventing
4432  // large pages from working.
4433  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4434  // recommendation is (in OSS notes) to switch it off.
4435  p = ::getenv("EXTSHM");
4436  if (Verbose) {
4437    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4438  }
4439  if (p && strcmp(p, "ON") == 0) {
4440    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4441    _extshm = 1;
4442  } else {
4443    _extshm = 0;
4444  }
4445
4446  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4447  // Not tested, not supported.
4448  //
4449  // Note that it might be worth the trouble to test and to require it, if only to
4450  // get useful return codes for mprotect.
4451  //
4452  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4453  // exec() ? before loading the libjvm ? ....)
4454  p = ::getenv("XPG_SUS_ENV");
4455  if (Verbose) {
4456    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
4457  }
4458  if (p && strcmp(p, "ON") == 0) {
4459    _xpg_sus_mode = 1;
4460    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
4461    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4462    // clobber address ranges. If we ever want to support that, we have to do some
4463    // testing first.
4464    guarantee(false, "XPG_SUS_ENV=ON not supported");
4465  } else {
4466    _xpg_sus_mode = 0;
4467  }
4468
4469  // Switch off AIX internal (pthread) guard pages. This has
4470  // immediate effect for any pthread_create calls which follow.
4471  p = ::getenv("AIXTHREAD_GUARDPAGES");
4472  if (Verbose) {
4473    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
4474    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
4475  }
4476  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4477  guarantee(rc == 0, "");
4478
4479} // end: os::Aix::scan_environment()
4480
4481// PASE: initialize the libo4 library (AS400 PASE porting library).
4482void os::Aix::initialize_libo4() {
4483  Unimplemented();
4484}
4485
4486// AIX: initialize the libperfstat library (we load this dynamically
4487// because it is only available on AIX.
4488void os::Aix::initialize_libperfstat() {
4489
4490  assert(os::Aix::on_aix(), "AIX only");
4491
4492  if (!libperfstat::init()) {
4493    fprintf(stderr, "libperfstat initialization failed.\n");
4494    assert(false, "libperfstat initialization failed");
4495  } else {
4496    if (Verbose) {
4497      fprintf(stderr, "libperfstat initialized.\n");
4498    }
4499  }
4500} // end: os::Aix::initialize_libperfstat
4501
4502/////////////////////////////////////////////////////////////////////////////
4503// thread stack
4504
4505// function to query the current stack size using pthread_getthrds_np
4506//
4507// ! do not change anything here unless you know what you are doing !
4508static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4509
4510  // This only works when invoked on a pthread. As we agreed not to use
4511  // primordial threads anyway, I assert here
4512  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4513
4514  // information about this api can be found (a) in the pthread.h header and
4515  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4516  //
4517  // The use of this API to find out the current stack is kind of undefined.
4518  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4519  // enough for cases where I let the pthread library create its stacks. For cases
4520  // where I create an own stack and pass this to pthread_create, it seems not to
4521  // work (the returned stack size in that case is 0).
4522
4523  pthread_t tid = pthread_self();
4524  struct __pthrdsinfo pinfo;
4525  char dummy[1]; // we only need this to satisfy the api and to not get E
4526  int dummy_size = sizeof(dummy);
4527
4528  memset(&pinfo, 0, sizeof(pinfo));
4529
4530  const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4531                                      sizeof(pinfo), dummy, &dummy_size);
4532
4533  if (rc != 0) {
4534    fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4535    guarantee(0, "pthread_getthrds_np failed");
4536  }
4537
4538  guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4539
4540  // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4541  // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4542  // Not sure what to do here - I feel inclined to forbid this use case completely.
4543  guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4544
4545  // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4546  if (p_stack_base) {
4547    (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4548  }
4549
4550  if (p_stack_size) {
4551    (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4552  }
4553
4554#ifndef PRODUCT
4555  if (Verbose) {
4556    fprintf(stderr,
4557            "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4558            ", real stack_size=" INTPTR_FORMAT
4559            ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4560            (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4561            (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4562            pinfo.__pi_stacksize - os::Aix::stack_page_size());
4563  }
4564#endif
4565
4566} // end query_stack_dimensions
4567
4568// get the current stack base from the OS (actually, the pthread library)
4569address os::current_stack_base() {
4570  address p;
4571  query_stack_dimensions(&p, 0);
4572  return p;
4573}
4574
4575// get the current stack size from the OS (actually, the pthread library)
4576size_t os::current_stack_size() {
4577  size_t s;
4578  query_stack_dimensions(0, &s);
4579  return s;
4580}
4581
4582// Refer to the comments in os_solaris.cpp park-unpark.
4583//
4584// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4585// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4586// For specifics regarding the bug see GLIBC BUGID 261237 :
4587//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4588// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4589// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4590// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4591// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4592// and monitorenter when we're using 1-0 locking. All those operations may result in
4593// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4594// of libpthread avoids the problem, but isn't practical.
4595//
4596// Possible remedies:
4597//
4598// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4599//      This is palliative and probabilistic, however. If the thread is preempted
4600//      between the call to compute_abstime() and pthread_cond_timedwait(), more
4601//      than the minimum period may have passed, and the abstime may be stale (in the
4602//      past) resultin in a hang. Using this technique reduces the odds of a hang
4603//      but the JVM is still vulnerable, particularly on heavily loaded systems.
4604//
4605// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4606//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4607//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4608//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4609//      thread.
4610//
4611// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4612//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4613//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4614//      This also works well. In fact it avoids kernel-level scalability impediments
4615//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4616//      timers in a graceful fashion.
4617//
4618// 4.   When the abstime value is in the past it appears that control returns
4619//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4620//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4621//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4622//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4623//      It may be possible to avoid reinitialization by checking the return
4624//      value from pthread_cond_timedwait(). In addition to reinitializing the
4625//      condvar we must establish the invariant that cond_signal() is only called
4626//      within critical sections protected by the adjunct mutex. This prevents
4627//      cond_signal() from "seeing" a condvar that's in the midst of being
4628//      reinitialized or that is corrupt. Sadly, this invariant obviates the
4629//      desirable signal-after-unlock optimization that avoids futile context switching.
4630//
4631//      I'm also concerned that some versions of NTPL might allocate an auxilliary
4632//      structure when a condvar is used or initialized. cond_destroy() would
4633//      release the helper structure. Our reinitialize-after-timedwait fix
4634//      put excessive stress on malloc/free and locks protecting the c-heap.
4635//
4636// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4637// It may be possible to refine (4) by checking the kernel and NTPL verisons
4638// and only enabling the work-around for vulnerable environments.
4639
4640// utility to compute the abstime argument to timedwait:
4641// millis is the relative timeout time
4642// abstime will be the absolute timeout time
4643// TODO: replace compute_abstime() with unpackTime()
4644
4645static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4646  if (millis < 0) millis = 0;
4647  struct timeval now;
4648  int status = gettimeofday(&now, NULL);
4649  assert(status == 0, "gettimeofday");
4650  jlong seconds = millis / 1000;
4651  millis %= 1000;
4652  if (seconds > 50000000) { // see man cond_timedwait(3T)
4653    seconds = 50000000;
4654  }
4655  abstime->tv_sec = now.tv_sec  + seconds;
4656  long       usec = now.tv_usec + millis * 1000;
4657  if (usec >= 1000000) {
4658    abstime->tv_sec += 1;
4659    usec -= 1000000;
4660  }
4661  abstime->tv_nsec = usec * 1000;
4662  return abstime;
4663}
4664
4665
4666// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4667// Conceptually TryPark() should be equivalent to park(0).
4668
4669int os::PlatformEvent::TryPark() {
4670  for (;;) {
4671    const int v = _Event;
4672    guarantee ((v == 0) || (v == 1), "invariant");
4673    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4674  }
4675}
4676
4677void os::PlatformEvent::park() {       // AKA "down()"
4678  // Invariant: Only the thread associated with the Event/PlatformEvent
4679  // may call park().
4680  // TODO: assert that _Assoc != NULL or _Assoc == Self
4681  int v;
4682  for (;;) {
4683    v = _Event;
4684    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4685  }
4686  guarantee (v >= 0, "invariant");
4687  if (v == 0) {
4688    // Do this the hard way by blocking ...
4689    int status = pthread_mutex_lock(_mutex);
4690    assert_status(status == 0, status, "mutex_lock");
4691    guarantee (_nParked == 0, "invariant");
4692    ++ _nParked;
4693    while (_Event < 0) {
4694      status = pthread_cond_wait(_cond, _mutex);
4695      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4696    }
4697    -- _nParked;
4698
4699    // In theory we could move the ST of 0 into _Event past the unlock(),
4700    // but then we'd need a MEMBAR after the ST.
4701    _Event = 0;
4702    status = pthread_mutex_unlock(_mutex);
4703    assert_status(status == 0, status, "mutex_unlock");
4704  }
4705  guarantee (_Event >= 0, "invariant");
4706}
4707
4708int os::PlatformEvent::park(jlong millis) {
4709  guarantee (_nParked == 0, "invariant");
4710
4711  int v;
4712  for (;;) {
4713    v = _Event;
4714    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4715  }
4716  guarantee (v >= 0, "invariant");
4717  if (v != 0) return OS_OK;
4718
4719  // We do this the hard way, by blocking the thread.
4720  // Consider enforcing a minimum timeout value.
4721  struct timespec abst;
4722  compute_abstime(&abst, millis);
4723
4724  int ret = OS_TIMEOUT;
4725  int status = pthread_mutex_lock(_mutex);
4726  assert_status(status == 0, status, "mutex_lock");
4727  guarantee (_nParked == 0, "invariant");
4728  ++_nParked;
4729
4730  // Object.wait(timo) will return because of
4731  // (a) notification
4732  // (b) timeout
4733  // (c) thread.interrupt
4734  //
4735  // Thread.interrupt and object.notify{All} both call Event::set.
4736  // That is, we treat thread.interrupt as a special case of notification.
4737  // The underlying Solaris implementation, cond_timedwait, admits
4738  // spurious/premature wakeups, but the JLS/JVM spec prevents the
4739  // JVM from making those visible to Java code. As such, we must
4740  // filter out spurious wakeups. We assume all ETIME returns are valid.
4741  //
4742  // TODO: properly differentiate simultaneous notify+interrupt.
4743  // In that case, we should propagate the notify to another waiter.
4744
4745  while (_Event < 0) {
4746    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4747    assert_status(status == 0 || status == ETIMEDOUT,
4748          status, "cond_timedwait");
4749    if (!FilterSpuriousWakeups) break;         // previous semantics
4750    if (status == ETIMEDOUT) break;
4751    // We consume and ignore EINTR and spurious wakeups.
4752  }
4753  --_nParked;
4754  if (_Event >= 0) {
4755     ret = OS_OK;
4756  }
4757  _Event = 0;
4758  status = pthread_mutex_unlock(_mutex);
4759  assert_status(status == 0, status, "mutex_unlock");
4760  assert (_nParked == 0, "invariant");
4761  return ret;
4762}
4763
4764void os::PlatformEvent::unpark() {
4765  int v, AnyWaiters;
4766  for (;;) {
4767    v = _Event;
4768    if (v > 0) {
4769      // The LD of _Event could have reordered or be satisfied
4770      // by a read-aside from this processor's write buffer.
4771      // To avoid problems execute a barrier and then
4772      // ratify the value.
4773      OrderAccess::fence();
4774      if (_Event == v) return;
4775      continue;
4776    }
4777    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4778  }
4779  if (v < 0) {
4780    // Wait for the thread associated with the event to vacate
4781    int status = pthread_mutex_lock(_mutex);
4782    assert_status(status == 0, status, "mutex_lock");
4783    AnyWaiters = _nParked;
4784
4785    if (AnyWaiters != 0) {
4786      // We intentional signal *after* dropping the lock
4787      // to avoid a common class of futile wakeups.
4788      status = pthread_cond_signal(_cond);
4789      assert_status(status == 0, status, "cond_signal");
4790    }
4791    // Mutex should be locked for pthread_cond_signal(_cond).
4792    status = pthread_mutex_unlock(_mutex);
4793    assert_status(status == 0, status, "mutex_unlock");
4794  }
4795
4796  // Note that we signal() _after dropping the lock for "immortal" Events.
4797  // This is safe and avoids a common class of futile wakeups. In rare
4798  // circumstances this can cause a thread to return prematurely from
4799  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4800  // simply re-test the condition and re-park itself.
4801}
4802
4803
4804// JSR166
4805// -------------------------------------------------------
4806
4807//
4808// The solaris and linux implementations of park/unpark are fairly
4809// conservative for now, but can be improved. They currently use a
4810// mutex/condvar pair, plus a a count.
4811// Park decrements count if > 0, else does a condvar wait. Unpark
4812// sets count to 1 and signals condvar. Only one thread ever waits
4813// on the condvar. Contention seen when trying to park implies that someone
4814// is unparking you, so don't wait. And spurious returns are fine, so there
4815// is no need to track notifications.
4816//
4817
4818#define MAX_SECS 100000000
4819//
4820// This code is common to linux and solaris and will be moved to a
4821// common place in dolphin.
4822//
4823// The passed in time value is either a relative time in nanoseconds
4824// or an absolute time in milliseconds. Either way it has to be unpacked
4825// into suitable seconds and nanoseconds components and stored in the
4826// given timespec structure.
4827// Given time is a 64-bit value and the time_t used in the timespec is only
4828// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4829// overflow if times way in the future are given. Further on Solaris versions
4830// prior to 10 there is a restriction (see cond_timedwait) that the specified
4831// number of seconds, in abstime, is less than current_time + 100,000,000.
4832// As it will be 28 years before "now + 100000000" will overflow we can
4833// ignore overflow and just impose a hard-limit on seconds using the value
4834// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4835// years from "now".
4836//
4837
4838static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4839  assert (time > 0, "convertTime");
4840
4841  struct timeval now;
4842  int status = gettimeofday(&now, NULL);
4843  assert(status == 0, "gettimeofday");
4844
4845  time_t max_secs = now.tv_sec + MAX_SECS;
4846
4847  if (isAbsolute) {
4848    jlong secs = time / 1000;
4849    if (secs > max_secs) {
4850      absTime->tv_sec = max_secs;
4851    }
4852    else {
4853      absTime->tv_sec = secs;
4854    }
4855    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4856  }
4857  else {
4858    jlong secs = time / NANOSECS_PER_SEC;
4859    if (secs >= MAX_SECS) {
4860      absTime->tv_sec = max_secs;
4861      absTime->tv_nsec = 0;
4862    }
4863    else {
4864      absTime->tv_sec = now.tv_sec + secs;
4865      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4866      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4867        absTime->tv_nsec -= NANOSECS_PER_SEC;
4868        ++absTime->tv_sec; // note: this must be <= max_secs
4869      }
4870    }
4871  }
4872  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4873  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4874  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4875  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4876}
4877
4878void Parker::park(bool isAbsolute, jlong time) {
4879  // Optional fast-path check:
4880  // Return immediately if a permit is available.
4881  if (_counter > 0) {
4882      _counter = 0;
4883      OrderAccess::fence();
4884      return;
4885  }
4886
4887  Thread* thread = Thread::current();
4888  assert(thread->is_Java_thread(), "Must be JavaThread");
4889  JavaThread *jt = (JavaThread *)thread;
4890
4891  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4892  // Check interrupt before trying to wait
4893  if (Thread::is_interrupted(thread, false)) {
4894    return;
4895  }
4896
4897  // Next, demultiplex/decode time arguments
4898  timespec absTime;
4899  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4900    return;
4901  }
4902  if (time > 0) {
4903    unpackTime(&absTime, isAbsolute, time);
4904  }
4905
4906
4907  // Enter safepoint region
4908  // Beware of deadlocks such as 6317397.
4909  // The per-thread Parker:: mutex is a classic leaf-lock.
4910  // In particular a thread must never block on the Threads_lock while
4911  // holding the Parker:: mutex. If safepoints are pending both the
4912  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4913  ThreadBlockInVM tbivm(jt);
4914
4915  // Don't wait if cannot get lock since interference arises from
4916  // unblocking. Also. check interrupt before trying wait
4917  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4918    return;
4919  }
4920
4921  int status;
4922  if (_counter > 0) { // no wait needed
4923    _counter = 0;
4924    status = pthread_mutex_unlock(_mutex);
4925    assert (status == 0, "invariant");
4926    OrderAccess::fence();
4927    return;
4928  }
4929
4930#ifdef ASSERT
4931  // Don't catch signals while blocked; let the running threads have the signals.
4932  // (This allows a debugger to break into the running thread.)
4933  sigset_t oldsigs;
4934  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4935  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4936#endif
4937
4938  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4939  jt->set_suspend_equivalent();
4940  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4941
4942  if (time == 0) {
4943    status = pthread_cond_wait (_cond, _mutex);
4944  } else {
4945    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4946    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4947      pthread_cond_destroy (_cond);
4948      pthread_cond_init    (_cond, NULL);
4949    }
4950  }
4951  assert_status(status == 0 || status == EINTR ||
4952                status == ETIME || status == ETIMEDOUT,
4953                status, "cond_timedwait");
4954
4955#ifdef ASSERT
4956  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4957#endif
4958
4959  _counter = 0;
4960  status = pthread_mutex_unlock(_mutex);
4961  assert_status(status == 0, status, "invariant");
4962  // If externally suspended while waiting, re-suspend
4963  if (jt->handle_special_suspend_equivalent_condition()) {
4964    jt->java_suspend_self();
4965  }
4966
4967  OrderAccess::fence();
4968}
4969
4970void Parker::unpark() {
4971  int s, status;
4972  status = pthread_mutex_lock(_mutex);
4973  assert (status == 0, "invariant");
4974  s = _counter;
4975  _counter = 1;
4976  if (s < 1) {
4977    if (WorkAroundNPTLTimedWaitHang) {
4978      status = pthread_cond_signal (_cond);
4979      assert (status == 0, "invariant");
4980      status = pthread_mutex_unlock(_mutex);
4981      assert (status == 0, "invariant");
4982    } else {
4983      status = pthread_mutex_unlock(_mutex);
4984      assert (status == 0, "invariant");
4985      status = pthread_cond_signal (_cond);
4986      assert (status == 0, "invariant");
4987    }
4988  } else {
4989    pthread_mutex_unlock(_mutex);
4990    assert (status == 0, "invariant");
4991  }
4992}
4993
4994
4995extern char** environ;
4996
4997// Run the specified command in a separate process. Return its exit value,
4998// or -1 on failure (e.g. can't fork a new process).
4999// Unlike system(), this function can be called from signal handler. It
5000// doesn't block SIGINT et al.
5001int os::fork_and_exec(char* cmd) {
5002  char * argv[4] = {"sh", "-c", cmd, NULL};
5003
5004  pid_t pid = fork();
5005
5006  if (pid < 0) {
5007    // fork failed
5008    return -1;
5009
5010  } else if (pid == 0) {
5011    // child process
5012
5013    // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
5014    execve("/usr/bin/sh", argv, environ);
5015
5016    // execve failed
5017    _exit(-1);
5018
5019  } else  {
5020    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5021    // care about the actual exit code, for now.
5022
5023    int status;
5024
5025    // Wait for the child process to exit.  This returns immediately if
5026    // the child has already exited. */
5027    while (waitpid(pid, &status, 0) < 0) {
5028        switch (errno) {
5029        case ECHILD: return 0;
5030        case EINTR: break;
5031        default: return -1;
5032        }
5033    }
5034
5035    if (WIFEXITED(status)) {
5036       // The child exited normally; get its exit code.
5037       return WEXITSTATUS(status);
5038    } else if (WIFSIGNALED(status)) {
5039       // The child exited because of a signal
5040       // The best value to return is 0x80 + signal number,
5041       // because that is what all Unix shells do, and because
5042       // it allows callers to distinguish between process exit and
5043       // process death by signal.
5044       return 0x80 + WTERMSIG(status);
5045    } else {
5046       // Unknown exit code; pass it through
5047       return status;
5048    }
5049  }
5050  // Remove warning.
5051  return -1;
5052}
5053
5054// is_headless_jre()
5055//
5056// Test for the existence of xawt/libmawt.so or libawt_xawt.so
5057// in order to report if we are running in a headless jre.
5058//
5059// Since JDK8 xawt/libmawt.so is moved into the same directory
5060// as libawt.so, and renamed libawt_xawt.so
5061bool os::is_headless_jre() {
5062  struct stat statbuf;
5063  char buf[MAXPATHLEN];
5064  char libmawtpath[MAXPATHLEN];
5065  const char *xawtstr  = "/xawt/libmawt.so";
5066  const char *new_xawtstr = "/libawt_xawt.so";
5067
5068  char *p;
5069
5070  // Get path to libjvm.so
5071  os::jvm_path(buf, sizeof(buf));
5072
5073  // Get rid of libjvm.so
5074  p = strrchr(buf, '/');
5075  if (p == NULL) return false;
5076  else *p = '\0';
5077
5078  // Get rid of client or server
5079  p = strrchr(buf, '/');
5080  if (p == NULL) return false;
5081  else *p = '\0';
5082
5083  // check xawt/libmawt.so
5084  strcpy(libmawtpath, buf);
5085  strcat(libmawtpath, xawtstr);
5086  if (::stat(libmawtpath, &statbuf) == 0) return false;
5087
5088  // check libawt_xawt.so
5089  strcpy(libmawtpath, buf);
5090  strcat(libmawtpath, new_xawtstr);
5091  if (::stat(libmawtpath, &statbuf) == 0) return false;
5092
5093  return true;
5094}
5095
5096// Get the default path to the core file
5097// Returns the length of the string
5098int os::get_core_path(char* buffer, size_t bufferSize) {
5099  const char* p = get_current_directory(buffer, bufferSize);
5100
5101  if (p == NULL) {
5102    assert(p != NULL, "failed to get current directory");
5103    return 0;
5104  }
5105
5106  return strlen(buffer);
5107}
5108
5109#ifndef PRODUCT
5110void TestReserveMemorySpecial_test() {
5111  // No tests available for this platform
5112}
5113#endif
5114