os_aix.cpp revision 9846:f2e3963c296d
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libo4.hpp"
40#include "libperfstat_aix.hpp"
41#include "libodm_aix.hpp"
42#include "loadlib_aix.hpp"
43#include "memory/allocation.inline.hpp"
44#include "memory/filemap.hpp"
45#include "misc_aix.hpp"
46#include "mutex_aix.inline.hpp"
47#include "oops/oop.inline.hpp"
48#include "os_aix.inline.hpp"
49#include "os_share_aix.hpp"
50#include "porting_aix.hpp"
51#include "prims/jniFastGetField.hpp"
52#include "prims/jvm.h"
53#include "prims/jvm_misc.hpp"
54#include "runtime/arguments.hpp"
55#include "runtime/atomic.inline.hpp"
56#include "runtime/extendedPC.hpp"
57#include "runtime/globals.hpp"
58#include "runtime/interfaceSupport.hpp"
59#include "runtime/java.hpp"
60#include "runtime/javaCalls.hpp"
61#include "runtime/mutexLocker.hpp"
62#include "runtime/objectMonitor.hpp"
63#include "runtime/orderAccess.inline.hpp"
64#include "runtime/os.hpp"
65#include "runtime/osThread.hpp"
66#include "runtime/perfMemory.hpp"
67#include "runtime/sharedRuntime.hpp"
68#include "runtime/statSampler.hpp"
69#include "runtime/stubRoutines.hpp"
70#include "runtime/thread.inline.hpp"
71#include "runtime/threadCritical.hpp"
72#include "runtime/timer.hpp"
73#include "runtime/vm_version.hpp"
74#include "services/attachListener.hpp"
75#include "services/runtimeService.hpp"
76#include "utilities/decoder.hpp"
77#include "utilities/defaultStream.hpp"
78#include "utilities/events.hpp"
79#include "utilities/growableArray.hpp"
80#include "utilities/vmError.hpp"
81
82// put OS-includes here (sorted alphabetically)
83#include <errno.h>
84#include <fcntl.h>
85#include <inttypes.h>
86#include <poll.h>
87#include <procinfo.h>
88#include <pthread.h>
89#include <pwd.h>
90#include <semaphore.h>
91#include <signal.h>
92#include <stdint.h>
93#include <stdio.h>
94#include <string.h>
95#include <unistd.h>
96#include <sys/ioctl.h>
97#include <sys/ipc.h>
98#include <sys/mman.h>
99#include <sys/resource.h>
100#include <sys/select.h>
101#include <sys/shm.h>
102#include <sys/socket.h>
103#include <sys/stat.h>
104#include <sys/sysinfo.h>
105#include <sys/systemcfg.h>
106#include <sys/time.h>
107#include <sys/times.h>
108#include <sys/types.h>
109#include <sys/utsname.h>
110#include <sys/vminfo.h>
111#include <sys/wait.h>
112
113// Missing prototypes for various system APIs.
114extern "C"
115int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
116
117#if !defined(_AIXVERSION_610)
118extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
119extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
120extern "C" int getargs   (procsinfo*, int, char*, int);
121#endif
122
123#define MAX_PATH (2 * K)
124
125// for timer info max values which include all bits
126#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
127// for multipage initialization error analysis (in 'g_multipage_error')
128#define ERROR_MP_OS_TOO_OLD                          100
129#define ERROR_MP_EXTSHM_ACTIVE                       101
130#define ERROR_MP_VMGETINFO_FAILED                    102
131#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
132
133// The semantics in this file are thus that codeptr_t is a *real code ptr*.
134// This means that any function taking codeptr_t as arguments will assume
135// a real codeptr and won't handle function descriptors (eg getFuncName),
136// whereas functions taking address as args will deal with function
137// descriptors (eg os::dll_address_to_library_name).
138typedef unsigned int* codeptr_t;
139
140// Typedefs for stackslots, stack pointers, pointers to op codes.
141typedef unsigned long stackslot_t;
142typedef stackslot_t* stackptr_t;
143
144// Query dimensions of the stack of the calling thread.
145static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
146static address resolve_function_descriptor_to_code_pointer(address p);
147
148// Function to check a given stack pointer against given stack limits.
149inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
150  if (((uintptr_t)sp) & 0x7) {
151    return false;
152  }
153  if (sp > stack_base) {
154    return false;
155  }
156  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
157    return false;
158  }
159  return true;
160}
161
162// Returns true if function is a valid codepointer.
163inline bool is_valid_codepointer(codeptr_t p) {
164  if (!p) {
165    return false;
166  }
167  if (((uintptr_t)p) & 0x3) {
168    return false;
169  }
170  if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
171    return false;
172  }
173  return true;
174}
175
176// Macro to check a given stack pointer against given stack limits and to die if test fails.
177#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
178    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
179}
180
181// Macro to check the current stack pointer against given stacklimits.
182#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
183  address sp; \
184  sp = os::current_stack_pointer(); \
185  CHECK_STACK_PTR(sp, stack_base, stack_size); \
186}
187
188static void vmembk_print_on(outputStream* os);
189
190////////////////////////////////////////////////////////////////////////////////
191// global variables (for a description see os_aix.hpp)
192
193julong    os::Aix::_physical_memory = 0;
194
195pthread_t os::Aix::_main_thread = ((pthread_t)0);
196int       os::Aix::_page_size = -1;
197
198// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
199int       os::Aix::_on_pase = -1;
200
201// 0 = uninitialized, otherwise 32 bit number:
202//  0xVVRRTTSS
203//  VV - major version
204//  RR - minor version
205//  TT - tech level, if known, 0 otherwise
206//  SS - service pack, if known, 0 otherwise
207uint32_t  os::Aix::_os_version = 0;
208
209int       os::Aix::_stack_page_size = -1;
210
211// -1 = uninitialized, 0 - no, 1 - yes
212int       os::Aix::_xpg_sus_mode = -1;
213
214// -1 = uninitialized, 0 - no, 1 - yes
215int       os::Aix::_extshm = -1;
216
217////////////////////////////////////////////////////////////////////////////////
218// local variables
219
220static jlong    initial_time_count = 0;
221static int      clock_tics_per_sec = 100;
222static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
223static bool     check_signals      = true;
224static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
225static sigset_t SR_sigset;
226
227// Process break recorded at startup.
228static address g_brk_at_startup = NULL;
229
230// This describes the state of multipage support of the underlying
231// OS. Note that this is of no interest to the outsize world and
232// therefore should not be defined in AIX class.
233//
234// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
235// latter two (16M "large" resp. 16G "huge" pages) require special
236// setup and are normally not available.
237//
238// AIX supports multiple page sizes per process, for:
239//  - Stack (of the primordial thread, so not relevant for us)
240//  - Data - data, bss, heap, for us also pthread stacks
241//  - Text - text code
242//  - shared memory
243//
244// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
245// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
246//
247// For shared memory, page size can be set dynamically via
248// shmctl(). Different shared memory regions can have different page
249// sizes.
250//
251// More information can be found at AIBM info center:
252//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
253//
254static struct {
255  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
256  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
257  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
258  size_t pthr_stack_pagesize; // stack page size of pthread threads
259  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
260  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
261  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
262  int error;                  // Error describing if something went wrong at multipage init.
263} g_multipage_support = {
264  (size_t) -1,
265  (size_t) -1,
266  (size_t) -1,
267  (size_t) -1,
268  (size_t) -1,
269  false, false,
270  0
271};
272
273// We must not accidentally allocate memory close to the BRK - even if
274// that would work - because then we prevent the BRK segment from
275// growing which may result in a malloc OOM even though there is
276// enough memory. The problem only arises if we shmat() or mmap() at
277// a specific wish address, e.g. to place the heap in a
278// compressed-oops-friendly way.
279static bool is_close_to_brk(address a) {
280  assert0(g_brk_at_startup != NULL);
281  if (a >= g_brk_at_startup &&
282      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
283    return true;
284  }
285  return false;
286}
287
288julong os::available_memory() {
289  return Aix::available_memory();
290}
291
292julong os::Aix::available_memory() {
293  // Avoid expensive API call here, as returned value will always be null.
294  if (os::Aix::on_pase()) {
295    return 0x0LL;
296  }
297  os::Aix::meminfo_t mi;
298  if (os::Aix::get_meminfo(&mi)) {
299    return mi.real_free;
300  } else {
301    return ULONG_MAX;
302  }
303}
304
305julong os::physical_memory() {
306  return Aix::physical_memory();
307}
308
309// Return true if user is running as root.
310
311bool os::have_special_privileges() {
312  static bool init = false;
313  static bool privileges = false;
314  if (!init) {
315    privileges = (getuid() != geteuid()) || (getgid() != getegid());
316    init = true;
317  }
318  return privileges;
319}
320
321// Helper function, emulates disclaim64 using multiple 32bit disclaims
322// because we cannot use disclaim64() on AS/400 and old AIX releases.
323static bool my_disclaim64(char* addr, size_t size) {
324
325  if (size == 0) {
326    return true;
327  }
328
329  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
330  const unsigned int maxDisclaimSize = 0x40000000;
331
332  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
333  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
334
335  char* p = addr;
336
337  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
338    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
339      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
340      return false;
341    }
342    p += maxDisclaimSize;
343  }
344
345  if (lastDisclaimSize > 0) {
346    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
347      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
348      return false;
349    }
350  }
351
352  return true;
353}
354
355// Cpu architecture string
356#if defined(PPC32)
357static char cpu_arch[] = "ppc";
358#elif defined(PPC64)
359static char cpu_arch[] = "ppc64";
360#else
361#error Add appropriate cpu_arch setting
362#endif
363
364// Wrap the function "vmgetinfo" which is not available on older OS releases.
365static int checked_vmgetinfo(void *out, int command, int arg) {
366  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
367    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
368  }
369  return ::vmgetinfo(out, command, arg);
370}
371
372// Given an address, returns the size of the page backing that address.
373size_t os::Aix::query_pagesize(void* addr) {
374
375  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
376    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
377    return SIZE_4K;
378  }
379
380  vm_page_info pi;
381  pi.addr = (uint64_t)addr;
382  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
383    return pi.pagesize;
384  } else {
385    assert(false, "vmgetinfo failed to retrieve page size");
386    return SIZE_4K;
387  }
388}
389
390void os::Aix::initialize_system_info() {
391
392  // Get the number of online(logical) cpus instead of configured.
393  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
394  assert(_processor_count > 0, "_processor_count must be > 0");
395
396  // Retrieve total physical storage.
397  os::Aix::meminfo_t mi;
398  if (!os::Aix::get_meminfo(&mi)) {
399    assert(false, "os::Aix::get_meminfo failed.");
400  }
401  _physical_memory = (julong) mi.real_total;
402}
403
404// Helper function for tracing page sizes.
405static const char* describe_pagesize(size_t pagesize) {
406  switch (pagesize) {
407    case SIZE_4K : return "4K";
408    case SIZE_64K: return "64K";
409    case SIZE_16M: return "16M";
410    case SIZE_16G: return "16G";
411    default:
412      assert(false, "surprise");
413      return "??";
414  }
415}
416
417// Probe OS for multipage support.
418// Will fill the global g_multipage_support structure.
419// Must be called before calling os::large_page_init().
420static void query_multipage_support() {
421
422  guarantee(g_multipage_support.pagesize == -1,
423            "do not call twice");
424
425  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
426
427  // This really would surprise me.
428  assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
429
430  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
431  // Default data page size is defined either by linker options (-bdatapsize)
432  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
433  // default should be 4K.
434  {
435    void* p = ::malloc(SIZE_16M);
436    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
437    ::free(p);
438  }
439
440  // Query default shm page size (LDR_CNTRL SHMPSIZE).
441  // Note that this is pure curiosity. We do not rely on default page size but set
442  // our own page size after allocated.
443  {
444    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
445    guarantee(shmid != -1, "shmget failed");
446    void* p = ::shmat(shmid, NULL, 0);
447    ::shmctl(shmid, IPC_RMID, NULL);
448    guarantee(p != (void*) -1, "shmat failed");
449    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
450    ::shmdt(p);
451  }
452
453  // Before querying the stack page size, make sure we are not running as primordial
454  // thread (because primordial thread's stack may have different page size than
455  // pthread thread stacks). Running a VM on the primordial thread won't work for a
456  // number of reasons so we may just as well guarantee it here.
457  guarantee0(!os::Aix::is_primordial_thread());
458
459  // Query pthread stack page size. Should be the same as data page size because
460  // pthread stacks are allocated from C-Heap.
461  {
462    int dummy = 0;
463    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
464  }
465
466  // Query default text page size (LDR_CNTRL TEXTPSIZE).
467  {
468    address any_function =
469      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
470    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
471  }
472
473  // Now probe for support of 64K pages and 16M pages.
474
475  // Before OS/400 V6R1, there is no support for pages other than 4K.
476  if (os::Aix::on_pase_V5R4_or_older()) {
477    trcVerbose("OS/400 < V6R1 - no large page support.");
478    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
479    goto query_multipage_support_end;
480  }
481
482  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
483  {
484    const int MAX_PAGE_SIZES = 4;
485    psize_t sizes[MAX_PAGE_SIZES];
486    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
487    if (num_psizes == -1) {
488      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
489      trcVerbose("disabling multipage support.");
490      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
491      goto query_multipage_support_end;
492    }
493    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
494    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
495    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
496    for (int i = 0; i < num_psizes; i ++) {
497      trcVerbose(" %s ", describe_pagesize(sizes[i]));
498    }
499
500    // Can we use 64K, 16M pages?
501    for (int i = 0; i < num_psizes; i ++) {
502      const size_t pagesize = sizes[i];
503      if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
504        continue;
505      }
506      bool can_use = false;
507      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
508      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
509        IPC_CREAT | S_IRUSR | S_IWUSR);
510      guarantee0(shmid != -1); // Should always work.
511      // Try to set pagesize.
512      struct shmid_ds shm_buf = { 0 };
513      shm_buf.shm_pagesize = pagesize;
514      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
515        const int en = errno;
516        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
517        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
518          errno);
519      } else {
520        // Attach and double check pageisze.
521        void* p = ::shmat(shmid, NULL, 0);
522        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
523        guarantee0(p != (void*) -1); // Should always work.
524        const size_t real_pagesize = os::Aix::query_pagesize(p);
525        if (real_pagesize != pagesize) {
526          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
527        } else {
528          can_use = true;
529        }
530        ::shmdt(p);
531      }
532      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
533      if (pagesize == SIZE_64K) {
534        g_multipage_support.can_use_64K_pages = can_use;
535      } else if (pagesize == SIZE_16M) {
536        g_multipage_support.can_use_16M_pages = can_use;
537      }
538    }
539
540  } // end: check which pages can be used for shared memory
541
542query_multipage_support_end:
543
544  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
545      describe_pagesize(g_multipage_support.pagesize));
546  trcVerbose("Data page size (C-Heap, bss, etc): %s",
547      describe_pagesize(g_multipage_support.datapsize));
548  trcVerbose("Text page size: %s",
549      describe_pagesize(g_multipage_support.textpsize));
550  trcVerbose("Thread stack page size (pthread): %s",
551      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
552  trcVerbose("Default shared memory page size: %s",
553      describe_pagesize(g_multipage_support.shmpsize));
554  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
555      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
556  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
557      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
558  trcVerbose("Multipage error details: %d",
559      g_multipage_support.error);
560
561  // sanity checks
562  assert0(g_multipage_support.pagesize == SIZE_4K);
563  assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
564  assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
565  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
566  assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
567
568}
569
570void os::init_system_properties_values() {
571
572#define DEFAULT_LIBPATH "/lib:/usr/lib"
573#define EXTENSIONS_DIR  "/lib/ext"
574
575  // Buffer that fits several sprintfs.
576  // Note that the space for the trailing null is provided
577  // by the nulls included by the sizeof operator.
578  const size_t bufsize =
579    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
580         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
581  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
582
583  // sysclasspath, java_home, dll_dir
584  {
585    char *pslash;
586    os::jvm_path(buf, bufsize);
587
588    // Found the full path to libjvm.so.
589    // Now cut the path to <java_home>/jre if we can.
590    pslash = strrchr(buf, '/');
591    if (pslash != NULL) {
592      *pslash = '\0';            // Get rid of /libjvm.so.
593    }
594    pslash = strrchr(buf, '/');
595    if (pslash != NULL) {
596      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
597    }
598    Arguments::set_dll_dir(buf);
599
600    if (pslash != NULL) {
601      pslash = strrchr(buf, '/');
602      if (pslash != NULL) {
603        *pslash = '\0';          // Get rid of /<arch>.
604        pslash = strrchr(buf, '/');
605        if (pslash != NULL) {
606          *pslash = '\0';        // Get rid of /lib.
607        }
608      }
609    }
610    Arguments::set_java_home(buf);
611    set_boot_path('/', ':');
612  }
613
614  // Where to look for native libraries.
615
616  // On Aix we get the user setting of LIBPATH.
617  // Eventually, all the library path setting will be done here.
618  // Get the user setting of LIBPATH.
619  const char *v = ::getenv("LIBPATH");
620  const char *v_colon = ":";
621  if (v == NULL) { v = ""; v_colon = ""; }
622
623  // Concatenate user and invariant part of ld_library_path.
624  // That's +1 for the colon and +1 for the trailing '\0'.
625  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
626  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
627  Arguments::set_library_path(ld_library_path);
628  FREE_C_HEAP_ARRAY(char, ld_library_path);
629
630  // Extensions directories.
631  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
632  Arguments::set_ext_dirs(buf);
633
634  FREE_C_HEAP_ARRAY(char, buf);
635
636#undef DEFAULT_LIBPATH
637#undef EXTENSIONS_DIR
638}
639
640////////////////////////////////////////////////////////////////////////////////
641// breakpoint support
642
643void os::breakpoint() {
644  BREAKPOINT;
645}
646
647extern "C" void breakpoint() {
648  // use debugger to set breakpoint here
649}
650
651////////////////////////////////////////////////////////////////////////////////
652// signal support
653
654debug_only(static bool signal_sets_initialized = false);
655static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
656
657bool os::Aix::is_sig_ignored(int sig) {
658  struct sigaction oact;
659  sigaction(sig, (struct sigaction*)NULL, &oact);
660  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
661    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
662  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
663    return true;
664  } else {
665    return false;
666  }
667}
668
669void os::Aix::signal_sets_init() {
670  // Should also have an assertion stating we are still single-threaded.
671  assert(!signal_sets_initialized, "Already initialized");
672  // Fill in signals that are necessarily unblocked for all threads in
673  // the VM. Currently, we unblock the following signals:
674  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
675  //                         by -Xrs (=ReduceSignalUsage));
676  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
677  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
678  // the dispositions or masks wrt these signals.
679  // Programs embedding the VM that want to use the above signals for their
680  // own purposes must, at this time, use the "-Xrs" option to prevent
681  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
682  // (See bug 4345157, and other related bugs).
683  // In reality, though, unblocking these signals is really a nop, since
684  // these signals are not blocked by default.
685  sigemptyset(&unblocked_sigs);
686  sigemptyset(&allowdebug_blocked_sigs);
687  sigaddset(&unblocked_sigs, SIGILL);
688  sigaddset(&unblocked_sigs, SIGSEGV);
689  sigaddset(&unblocked_sigs, SIGBUS);
690  sigaddset(&unblocked_sigs, SIGFPE);
691  sigaddset(&unblocked_sigs, SIGTRAP);
692  sigaddset(&unblocked_sigs, SIGDANGER);
693  sigaddset(&unblocked_sigs, SR_signum);
694
695  if (!ReduceSignalUsage) {
696   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
697     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
698     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
699   }
700   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
701     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
702     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
703   }
704   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
705     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
706     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
707   }
708  }
709  // Fill in signals that are blocked by all but the VM thread.
710  sigemptyset(&vm_sigs);
711  if (!ReduceSignalUsage)
712    sigaddset(&vm_sigs, BREAK_SIGNAL);
713  debug_only(signal_sets_initialized = true);
714}
715
716// These are signals that are unblocked while a thread is running Java.
717// (For some reason, they get blocked by default.)
718sigset_t* os::Aix::unblocked_signals() {
719  assert(signal_sets_initialized, "Not initialized");
720  return &unblocked_sigs;
721}
722
723// These are the signals that are blocked while a (non-VM) thread is
724// running Java. Only the VM thread handles these signals.
725sigset_t* os::Aix::vm_signals() {
726  assert(signal_sets_initialized, "Not initialized");
727  return &vm_sigs;
728}
729
730// These are signals that are blocked during cond_wait to allow debugger in
731sigset_t* os::Aix::allowdebug_blocked_signals() {
732  assert(signal_sets_initialized, "Not initialized");
733  return &allowdebug_blocked_sigs;
734}
735
736void os::Aix::hotspot_sigmask(Thread* thread) {
737
738  //Save caller's signal mask before setting VM signal mask
739  sigset_t caller_sigmask;
740  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
741
742  OSThread* osthread = thread->osthread();
743  osthread->set_caller_sigmask(caller_sigmask);
744
745  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
746
747  if (!ReduceSignalUsage) {
748    if (thread->is_VM_thread()) {
749      // Only the VM thread handles BREAK_SIGNAL ...
750      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
751    } else {
752      // ... all other threads block BREAK_SIGNAL
753      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
754    }
755  }
756}
757
758// retrieve memory information.
759// Returns false if something went wrong;
760// content of pmi undefined in this case.
761bool os::Aix::get_meminfo(meminfo_t* pmi) {
762
763  assert(pmi, "get_meminfo: invalid parameter");
764
765  memset(pmi, 0, sizeof(meminfo_t));
766
767  if (os::Aix::on_pase()) {
768    // On PASE, use the libo4 porting library.
769
770    unsigned long long virt_total = 0;
771    unsigned long long real_total = 0;
772    unsigned long long real_free = 0;
773    unsigned long long pgsp_total = 0;
774    unsigned long long pgsp_free = 0;
775    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
776      pmi->virt_total = virt_total;
777      pmi->real_total = real_total;
778      pmi->real_free = real_free;
779      pmi->pgsp_total = pgsp_total;
780      pmi->pgsp_free = pgsp_free;
781      return true;
782    }
783    return false;
784
785  } else {
786
787    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
788    // See:
789    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
790    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
791    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
792    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
793
794    perfstat_memory_total_t psmt;
795    memset (&psmt, '\0', sizeof(psmt));
796    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
797    if (rc == -1) {
798      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
799      assert(0, "perfstat_memory_total() failed");
800      return false;
801    }
802
803    assert(rc == 1, "perfstat_memory_total() - weird return code");
804
805    // excerpt from
806    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
807    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
808    // The fields of perfstat_memory_total_t:
809    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
810    // u_longlong_t real_total         Total real memory (in 4 KB pages).
811    // u_longlong_t real_free          Free real memory (in 4 KB pages).
812    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
813    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
814
815    pmi->virt_total = psmt.virt_total * 4096;
816    pmi->real_total = psmt.real_total * 4096;
817    pmi->real_free = psmt.real_free * 4096;
818    pmi->pgsp_total = psmt.pgsp_total * 4096;
819    pmi->pgsp_free = psmt.pgsp_free * 4096;
820
821    return true;
822
823  }
824} // end os::Aix::get_meminfo
825
826//////////////////////////////////////////////////////////////////////////////
827// create new thread
828
829// Thread start routine for all newly created threads
830static void *java_start(Thread *thread) {
831
832  // find out my own stack dimensions
833  {
834    // actually, this should do exactly the same as thread->record_stack_base_and_size...
835    address base = 0;
836    size_t size = 0;
837    query_stack_dimensions(&base, &size);
838    thread->set_stack_base(base);
839    thread->set_stack_size(size);
840  }
841
842  const pthread_t pthread_id = ::pthread_self();
843  const tid_t kernel_thread_id = ::thread_self();
844
845  trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
846    ", stack %p ... %p, stacksize 0x%IX (%IB)",
847    pthread_id, kernel_thread_id,
848    thread->stack_base() - thread->stack_size(),
849    thread->stack_base(),
850    thread->stack_size(),
851    thread->stack_size());
852
853  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
854  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
855  // tools hook pthread_create(). In this case, we may run into problems establishing
856  // guard pages on those stacks, because the stacks may reside in memory which is not
857  // protectable (shmated).
858  if (thread->stack_base() > ::sbrk(0)) {
859    trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
860  }
861
862  // Do some sanity checks.
863  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
864
865  // Try to randomize the cache line index of hot stack frames.
866  // This helps when threads of the same stack traces evict each other's
867  // cache lines. The threads can be either from the same JVM instance, or
868  // from different JVM instances. The benefit is especially true for
869  // processors with hyperthreading technology.
870
871  static int counter = 0;
872  int pid = os::current_process_id();
873  alloca(((pid ^ counter++) & 7) * 128);
874
875  thread->initialize_thread_current();
876
877  OSThread* osthread = thread->osthread();
878
879  // Thread_id is pthread id.
880  osthread->set_thread_id(pthread_id);
881
882  // .. but keep kernel thread id too for diagnostics
883  osthread->set_kernel_thread_id(kernel_thread_id);
884
885  // Initialize signal mask for this thread.
886  os::Aix::hotspot_sigmask(thread);
887
888  // Initialize floating point control register.
889  os::Aix::init_thread_fpu_state();
890
891  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
892
893  // Call one more level start routine.
894  thread->run();
895
896  trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
897    pthread_id, kernel_thread_id);
898
899  return 0;
900}
901
902bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
903
904  assert(thread->osthread() == NULL, "caller responsible");
905
906  // Allocate the OSThread object
907  OSThread* osthread = new OSThread(NULL, NULL);
908  if (osthread == NULL) {
909    return false;
910  }
911
912  // set the correct thread state
913  osthread->set_thread_type(thr_type);
914
915  // Initial state is ALLOCATED but not INITIALIZED
916  osthread->set_state(ALLOCATED);
917
918  thread->set_osthread(osthread);
919
920  // init thread attributes
921  pthread_attr_t attr;
922  pthread_attr_init(&attr);
923  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
924
925  // Make sure we run in 1:1 kernel-user-thread mode.
926  if (os::Aix::on_aix()) {
927    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
928    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
929  } // end: aix
930
931  // Start in suspended state, and in os::thread_start, wake the thread up.
932  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
933
934  // calculate stack size if it's not specified by caller
935  if (stack_size == 0) {
936    stack_size = os::Aix::default_stack_size(thr_type);
937
938    switch (thr_type) {
939    case os::java_thread:
940      // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
941      assert(JavaThread::stack_size_at_create() > 0, "this should be set");
942      stack_size = JavaThread::stack_size_at_create();
943      break;
944    case os::compiler_thread:
945      if (CompilerThreadStackSize > 0) {
946        stack_size = (size_t)(CompilerThreadStackSize * K);
947        break;
948      } // else fall through:
949        // use VMThreadStackSize if CompilerThreadStackSize is not defined
950    case os::vm_thread:
951    case os::pgc_thread:
952    case os::cgc_thread:
953    case os::watcher_thread:
954      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
955      break;
956    }
957  }
958
959  stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
960  pthread_attr_setstacksize(&attr, stack_size);
961
962  pthread_t tid;
963  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
964
965  pthread_attr_destroy(&attr);
966
967  if (ret == 0) {
968    trcVerbose("Created New Thread : pthread-id %u", tid);
969  } else {
970    if (os::Aix::on_pase()) {
971      // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
972      // using QSH. Otherwise pthread_create fails with errno=11.
973      trcVerbose("(Please make sure you set the environment variable "
974              "QIBM_MULTI_THREADED=Y before running this program.)");
975    }
976    if (PrintMiscellaneous && (Verbose || WizardMode)) {
977      perror("pthread_create()");
978    }
979    // Need to clean up stuff we've allocated so far
980    thread->set_osthread(NULL);
981    delete osthread;
982    return false;
983  }
984
985  // OSThread::thread_id is the pthread id.
986  osthread->set_thread_id(tid);
987
988  return true;
989}
990
991/////////////////////////////////////////////////////////////////////////////
992// attach existing thread
993
994// bootstrap the main thread
995bool os::create_main_thread(JavaThread* thread) {
996  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
997  return create_attached_thread(thread);
998}
999
1000bool os::create_attached_thread(JavaThread* thread) {
1001#ifdef ASSERT
1002    thread->verify_not_published();
1003#endif
1004
1005  // Allocate the OSThread object
1006  OSThread* osthread = new OSThread(NULL, NULL);
1007
1008  if (osthread == NULL) {
1009    return false;
1010  }
1011
1012  const pthread_t pthread_id = ::pthread_self();
1013  const tid_t kernel_thread_id = ::thread_self();
1014
1015  trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
1016    pthread_id, kernel_thread_id,
1017    thread->stack_base() - thread->stack_size(),
1018    thread->stack_base(),
1019    thread->stack_size(),
1020    thread->stack_size());
1021
1022  // OSThread::thread_id is the pthread id.
1023  osthread->set_thread_id(pthread_id);
1024
1025  // .. but keep kernel thread id too for diagnostics
1026  osthread->set_kernel_thread_id(kernel_thread_id);
1027
1028  // initialize floating point control register
1029  os::Aix::init_thread_fpu_state();
1030
1031  // some sanity checks
1032  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1033
1034  // Initial thread state is RUNNABLE
1035  osthread->set_state(RUNNABLE);
1036
1037  thread->set_osthread(osthread);
1038
1039  if (UseNUMA) {
1040    int lgrp_id = os::numa_get_group_id();
1041    if (lgrp_id != -1) {
1042      thread->set_lgrp_id(lgrp_id);
1043    }
1044  }
1045
1046  // initialize signal mask for this thread
1047  // and save the caller's signal mask
1048  os::Aix::hotspot_sigmask(thread);
1049
1050  return true;
1051}
1052
1053void os::pd_start_thread(Thread* thread) {
1054  int status = pthread_continue_np(thread->osthread()->pthread_id());
1055  assert(status == 0, "thr_continue failed");
1056}
1057
1058// Free OS resources related to the OSThread
1059void os::free_thread(OSThread* osthread) {
1060  assert(osthread != NULL, "osthread not set");
1061
1062  if (Thread::current()->osthread() == osthread) {
1063    // Restore caller's signal mask
1064    sigset_t sigmask = osthread->caller_sigmask();
1065    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1066   }
1067
1068  delete osthread;
1069}
1070
1071////////////////////////////////////////////////////////////////////////////////
1072// time support
1073
1074// Time since start-up in seconds to a fine granularity.
1075// Used by VMSelfDestructTimer and the MemProfiler.
1076double os::elapsedTime() {
1077  return (double)(os::elapsed_counter()) * 0.000001;
1078}
1079
1080jlong os::elapsed_counter() {
1081  timeval time;
1082  int status = gettimeofday(&time, NULL);
1083  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1084}
1085
1086jlong os::elapsed_frequency() {
1087  return (1000 * 1000);
1088}
1089
1090bool os::supports_vtime() { return true; }
1091bool os::enable_vtime()   { return false; }
1092bool os::vtime_enabled()  { return false; }
1093
1094double os::elapsedVTime() {
1095  struct rusage usage;
1096  int retval = getrusage(RUSAGE_THREAD, &usage);
1097  if (retval == 0) {
1098    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1099  } else {
1100    // better than nothing, but not much
1101    return elapsedTime();
1102  }
1103}
1104
1105jlong os::javaTimeMillis() {
1106  timeval time;
1107  int status = gettimeofday(&time, NULL);
1108  assert(status != -1, "aix error at gettimeofday()");
1109  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1110}
1111
1112void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1113  timeval time;
1114  int status = gettimeofday(&time, NULL);
1115  assert(status != -1, "aix error at gettimeofday()");
1116  seconds = jlong(time.tv_sec);
1117  nanos = jlong(time.tv_usec) * 1000;
1118}
1119
1120jlong os::javaTimeNanos() {
1121  if (os::Aix::on_pase()) {
1122
1123    timeval time;
1124    int status = gettimeofday(&time, NULL);
1125    assert(status != -1, "PASE error at gettimeofday()");
1126    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1127    return 1000 * usecs;
1128
1129  } else {
1130    // On AIX use the precision of processors real time clock
1131    // or time base registers.
1132    timebasestruct_t time;
1133    int rc;
1134
1135    // If the CPU has a time register, it will be used and
1136    // we have to convert to real time first. After convertion we have following data:
1137    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1138    // time.tb_low  [nanoseconds after the last full second above]
1139    // We better use mread_real_time here instead of read_real_time
1140    // to ensure that we will get a monotonic increasing time.
1141    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1142      rc = time_base_to_time(&time, TIMEBASE_SZ);
1143      assert(rc != -1, "aix error at time_base_to_time()");
1144    }
1145    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1146  }
1147}
1148
1149void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1150  info_ptr->max_value = ALL_64_BITS;
1151  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1152  info_ptr->may_skip_backward = false;
1153  info_ptr->may_skip_forward = false;
1154  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1155}
1156
1157// Return the real, user, and system times in seconds from an
1158// arbitrary fixed point in the past.
1159bool os::getTimesSecs(double* process_real_time,
1160                      double* process_user_time,
1161                      double* process_system_time) {
1162  struct tms ticks;
1163  clock_t real_ticks = times(&ticks);
1164
1165  if (real_ticks == (clock_t) (-1)) {
1166    return false;
1167  } else {
1168    double ticks_per_second = (double) clock_tics_per_sec;
1169    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1170    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1171    *process_real_time = ((double) real_ticks) / ticks_per_second;
1172
1173    return true;
1174  }
1175}
1176
1177char * os::local_time_string(char *buf, size_t buflen) {
1178  struct tm t;
1179  time_t long_time;
1180  time(&long_time);
1181  localtime_r(&long_time, &t);
1182  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1183               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1184               t.tm_hour, t.tm_min, t.tm_sec);
1185  return buf;
1186}
1187
1188struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1189  return localtime_r(clock, res);
1190}
1191
1192////////////////////////////////////////////////////////////////////////////////
1193// runtime exit support
1194
1195// Note: os::shutdown() might be called very early during initialization, or
1196// called from signal handler. Before adding something to os::shutdown(), make
1197// sure it is async-safe and can handle partially initialized VM.
1198void os::shutdown() {
1199
1200  // allow PerfMemory to attempt cleanup of any persistent resources
1201  perfMemory_exit();
1202
1203  // needs to remove object in file system
1204  AttachListener::abort();
1205
1206  // flush buffered output, finish log files
1207  ostream_abort();
1208
1209  // Check for abort hook
1210  abort_hook_t abort_hook = Arguments::abort_hook();
1211  if (abort_hook != NULL) {
1212    abort_hook();
1213  }
1214}
1215
1216// Note: os::abort() might be called very early during initialization, or
1217// called from signal handler. Before adding something to os::abort(), make
1218// sure it is async-safe and can handle partially initialized VM.
1219void os::abort(bool dump_core, void* siginfo, const void* context) {
1220  os::shutdown();
1221  if (dump_core) {
1222#ifndef PRODUCT
1223    fdStream out(defaultStream::output_fd());
1224    out.print_raw("Current thread is ");
1225    char buf[16];
1226    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1227    out.print_raw_cr(buf);
1228    out.print_raw_cr("Dumping core ...");
1229#endif
1230    ::abort(); // dump core
1231  }
1232
1233  ::exit(1);
1234}
1235
1236// Die immediately, no exit hook, no abort hook, no cleanup.
1237void os::die() {
1238  ::abort();
1239}
1240
1241// This method is a copy of JDK's sysGetLastErrorString
1242// from src/solaris/hpi/src/system_md.c
1243
1244size_t os::lasterror(char *buf, size_t len) {
1245  if (errno == 0) return 0;
1246
1247  const char *s = ::strerror(errno);
1248  size_t n = ::strlen(s);
1249  if (n >= len) {
1250    n = len - 1;
1251  }
1252  ::strncpy(buf, s, n);
1253  buf[n] = '\0';
1254  return n;
1255}
1256
1257intx os::current_thread_id() {
1258  return (intx)pthread_self();
1259}
1260
1261int os::current_process_id() {
1262  return getpid();
1263}
1264
1265// DLL functions
1266
1267const char* os::dll_file_extension() { return ".so"; }
1268
1269// This must be hard coded because it's the system's temporary
1270// directory not the java application's temp directory, ala java.io.tmpdir.
1271const char* os::get_temp_directory() { return "/tmp"; }
1272
1273static bool file_exists(const char* filename) {
1274  struct stat statbuf;
1275  if (filename == NULL || strlen(filename) == 0) {
1276    return false;
1277  }
1278  return os::stat(filename, &statbuf) == 0;
1279}
1280
1281bool os::dll_build_name(char* buffer, size_t buflen,
1282                        const char* pname, const char* fname) {
1283  bool retval = false;
1284  // Copied from libhpi
1285  const size_t pnamelen = pname ? strlen(pname) : 0;
1286
1287  // Return error on buffer overflow.
1288  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1289    *buffer = '\0';
1290    return retval;
1291  }
1292
1293  if (pnamelen == 0) {
1294    snprintf(buffer, buflen, "lib%s.so", fname);
1295    retval = true;
1296  } else if (strchr(pname, *os::path_separator()) != NULL) {
1297    int n;
1298    char** pelements = split_path(pname, &n);
1299    if (pelements == NULL) {
1300      return false;
1301    }
1302    for (int i = 0; i < n; i++) {
1303      // Really shouldn't be NULL, but check can't hurt
1304      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1305        continue; // skip the empty path values
1306      }
1307      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1308      if (file_exists(buffer)) {
1309        retval = true;
1310        break;
1311      }
1312    }
1313    // release the storage
1314    for (int i = 0; i < n; i++) {
1315      if (pelements[i] != NULL) {
1316        FREE_C_HEAP_ARRAY(char, pelements[i]);
1317      }
1318    }
1319    if (pelements != NULL) {
1320      FREE_C_HEAP_ARRAY(char*, pelements);
1321    }
1322  } else {
1323    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1324    retval = true;
1325  }
1326  return retval;
1327}
1328
1329// Check if addr is inside libjvm.so.
1330bool os::address_is_in_vm(address addr) {
1331
1332  // Input could be a real pc or a function pointer literal. The latter
1333  // would be a function descriptor residing in the data segment of a module.
1334  loaded_module_t lm;
1335  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1336    return lm.is_in_vm;
1337  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1338    return lm.is_in_vm;
1339  } else {
1340    return false;
1341  }
1342
1343}
1344
1345// Resolve an AIX function descriptor literal to a code pointer.
1346// If the input is a valid code pointer to a text segment of a loaded module,
1347//   it is returned unchanged.
1348// If the input is a valid AIX function descriptor, it is resolved to the
1349//   code entry point.
1350// If the input is neither a valid function descriptor nor a valid code pointer,
1351//   NULL is returned.
1352static address resolve_function_descriptor_to_code_pointer(address p) {
1353
1354  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1355    // It is a real code pointer.
1356    return p;
1357  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1358    // Pointer to data segment, potential function descriptor.
1359    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1360    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1361      // It is a function descriptor.
1362      return code_entry;
1363    }
1364  }
1365
1366  return NULL;
1367}
1368
1369bool os::dll_address_to_function_name(address addr, char *buf,
1370                                      int buflen, int *offset,
1371                                      bool demangle) {
1372  if (offset) {
1373    *offset = -1;
1374  }
1375  // Buf is not optional, but offset is optional.
1376  assert(buf != NULL, "sanity check");
1377  buf[0] = '\0';
1378
1379  // Resolve function ptr literals first.
1380  addr = resolve_function_descriptor_to_code_pointer(addr);
1381  if (!addr) {
1382    return false;
1383  }
1384
1385  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1386  return Decoder::decode(addr, buf, buflen, offset, demangle);
1387}
1388
1389static int getModuleName(codeptr_t pc,                    // [in] program counter
1390                         char* p_name, size_t namelen,    // [out] optional: function name
1391                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1392                         ) {
1393
1394  if (p_name && namelen > 0) {
1395    *p_name = '\0';
1396  }
1397  if (p_errmsg && errmsglen > 0) {
1398    *p_errmsg = '\0';
1399  }
1400
1401  if (p_name && namelen > 0) {
1402    loaded_module_t lm;
1403    if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
1404      strncpy(p_name, lm.shortname, namelen);
1405      p_name[namelen - 1] = '\0';
1406    }
1407    return 0;
1408  }
1409
1410  return -1;
1411}
1412
1413bool os::dll_address_to_library_name(address addr, char* buf,
1414                                     int buflen, int* offset) {
1415  if (offset) {
1416    *offset = -1;
1417  }
1418  // Buf is not optional, but offset is optional.
1419  assert(buf != NULL, "sanity check");
1420  buf[0] = '\0';
1421
1422  // Resolve function ptr literals first.
1423  addr = resolve_function_descriptor_to_code_pointer(addr);
1424  if (!addr) {
1425    return false;
1426  }
1427
1428  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1429    return true;
1430  }
1431  return false;
1432}
1433
1434// Loads .dll/.so and in case of error it checks if .dll/.so was built
1435// for the same architecture as Hotspot is running on.
1436void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1437
1438  if (ebuf && ebuflen > 0) {
1439    ebuf[0] = '\0';
1440    ebuf[ebuflen - 1] = '\0';
1441  }
1442
1443  if (!filename || strlen(filename) == 0) {
1444    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1445    return NULL;
1446  }
1447
1448  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1449  void * result= ::dlopen(filename, RTLD_LAZY);
1450  if (result != NULL) {
1451    // Reload dll cache. Don't do this in signal handling.
1452    LoadedLibraries::reload();
1453    return result;
1454  } else {
1455    // error analysis when dlopen fails
1456    const char* const error_report = ::dlerror();
1457    if (error_report && ebuf && ebuflen > 0) {
1458      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1459               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1460    }
1461  }
1462  return NULL;
1463}
1464
1465void* os::dll_lookup(void* handle, const char* name) {
1466  void* res = dlsym(handle, name);
1467  return res;
1468}
1469
1470void* os::get_default_process_handle() {
1471  return (void*)::dlopen(NULL, RTLD_LAZY);
1472}
1473
1474void os::print_dll_info(outputStream *st) {
1475  st->print_cr("Dynamic libraries:");
1476  LoadedLibraries::print(st);
1477}
1478
1479void os::get_summary_os_info(char* buf, size_t buflen) {
1480  // There might be something more readable than uname results for AIX.
1481  struct utsname name;
1482  uname(&name);
1483  snprintf(buf, buflen, "%s %s", name.release, name.version);
1484}
1485
1486void os::print_os_info(outputStream* st) {
1487  st->print("OS:");
1488
1489  st->print("uname:");
1490  struct utsname name;
1491  uname(&name);
1492  st->print(name.sysname); st->print(" ");
1493  st->print(name.nodename); st->print(" ");
1494  st->print(name.release); st->print(" ");
1495  st->print(name.version); st->print(" ");
1496  st->print(name.machine);
1497  st->cr();
1498
1499  uint32_t ver = os::Aix::os_version();
1500  st->print_cr("AIX kernel version %u.%u.%u.%u",
1501               (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1502
1503  // rlimit
1504  st->print("rlimit:");
1505  struct rlimit rlim;
1506
1507  st->print(" STACK ");
1508  getrlimit(RLIMIT_STACK, &rlim);
1509  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1510  else st->print("%uk", rlim.rlim_cur >> 10);
1511
1512  st->print(", CORE ");
1513  getrlimit(RLIMIT_CORE, &rlim);
1514  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1515  else st->print("%uk", rlim.rlim_cur >> 10);
1516
1517  st->print(", NPROC ");
1518  st->print("%d", sysconf(_SC_CHILD_MAX));
1519
1520  st->print(", NOFILE ");
1521  getrlimit(RLIMIT_NOFILE, &rlim);
1522  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1523  else st->print("%d", rlim.rlim_cur);
1524
1525  st->print(", AS ");
1526  getrlimit(RLIMIT_AS, &rlim);
1527  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1528  else st->print("%uk", rlim.rlim_cur >> 10);
1529
1530  // Print limits on DATA, because it limits the C-heap.
1531  st->print(", DATA ");
1532  getrlimit(RLIMIT_DATA, &rlim);
1533  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1534  else st->print("%uk", rlim.rlim_cur >> 10);
1535  st->cr();
1536
1537  // load average
1538  st->print("load average:");
1539  double loadavg[3] = {-1.L, -1.L, -1.L};
1540  os::loadavg(loadavg, 3);
1541  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1542  st->cr();
1543
1544  // print wpar info
1545  libperfstat::wparinfo_t wi;
1546  if (libperfstat::get_wparinfo(&wi)) {
1547    st->print_cr("wpar info");
1548    st->print_cr("name: %s", wi.name);
1549    st->print_cr("id:   %d", wi.wpar_id);
1550    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1551  }
1552
1553  // print partition info
1554  libperfstat::partitioninfo_t pi;
1555  if (libperfstat::get_partitioninfo(&pi)) {
1556    st->print_cr("partition info");
1557    st->print_cr(" name: %s", pi.name);
1558  }
1559
1560}
1561
1562void os::print_memory_info(outputStream* st) {
1563
1564  st->print_cr("Memory:");
1565
1566  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1567    describe_pagesize(g_multipage_support.pagesize));
1568  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1569    describe_pagesize(g_multipage_support.datapsize));
1570  st->print_cr("  Text page size:                         %s",
1571    describe_pagesize(g_multipage_support.textpsize));
1572  st->print_cr("  Thread stack page size (pthread):       %s",
1573    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1574  st->print_cr("  Default shared memory page size:        %s",
1575    describe_pagesize(g_multipage_support.shmpsize));
1576  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1577    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1578  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1579    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1580  st->print_cr("  Multipage error: %d",
1581    g_multipage_support.error);
1582  st->cr();
1583  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1584  // not used in OpenJDK st->print_cr("  os::stack_page_size:    %s", describe_pagesize(os::stack_page_size()));
1585
1586  // print out LDR_CNTRL because it affects the default page sizes
1587  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1588  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1589
1590  // Print out EXTSHM because it is an unsupported setting.
1591  const char* const extshm = ::getenv("EXTSHM");
1592  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1593  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1594    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1595  }
1596
1597  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1598  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1599  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1600      aixthread_guardpages ? aixthread_guardpages : "<unset>");
1601
1602  os::Aix::meminfo_t mi;
1603  if (os::Aix::get_meminfo(&mi)) {
1604    char buffer[256];
1605    if (os::Aix::on_aix()) {
1606      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1607      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1608      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1609      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1610    } else {
1611      // PASE - Numbers are result of QWCRSSTS; they mean:
1612      // real_total: Sum of all system pools
1613      // real_free: always 0
1614      // pgsp_total: we take the size of the system ASP
1615      // pgsp_free: size of system ASP times percentage of system ASP unused
1616      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1617      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1618      st->print_cr("%% system asp used : " SIZE_FORMAT,
1619        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1620    }
1621    st->print_raw(buffer);
1622  }
1623  st->cr();
1624
1625  // Print segments allocated with os::reserve_memory.
1626  st->print_cr("internal virtual memory regions used by vm:");
1627  vmembk_print_on(st);
1628}
1629
1630// Get a string for the cpuinfo that is a summary of the cpu type
1631void os::get_summary_cpu_info(char* buf, size_t buflen) {
1632  // This looks good
1633  libperfstat::cpuinfo_t ci;
1634  if (libperfstat::get_cpuinfo(&ci)) {
1635    strncpy(buf, ci.version, buflen);
1636  } else {
1637    strncpy(buf, "AIX", buflen);
1638  }
1639}
1640
1641void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1642  st->print("CPU:");
1643  st->print("total %d", os::processor_count());
1644  // It's not safe to query number of active processors after crash.
1645  // st->print("(active %d)", os::active_processor_count());
1646  st->print(" %s", VM_Version::cpu_features());
1647  st->cr();
1648}
1649
1650static void print_signal_handler(outputStream* st, int sig,
1651                                 char* buf, size_t buflen);
1652
1653void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1654  st->print_cr("Signal Handlers:");
1655  print_signal_handler(st, SIGSEGV, buf, buflen);
1656  print_signal_handler(st, SIGBUS , buf, buflen);
1657  print_signal_handler(st, SIGFPE , buf, buflen);
1658  print_signal_handler(st, SIGPIPE, buf, buflen);
1659  print_signal_handler(st, SIGXFSZ, buf, buflen);
1660  print_signal_handler(st, SIGILL , buf, buflen);
1661  print_signal_handler(st, SR_signum, buf, buflen);
1662  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1663  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1664  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1665  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1666  print_signal_handler(st, SIGTRAP, buf, buflen);
1667  print_signal_handler(st, SIGDANGER, buf, buflen);
1668}
1669
1670static char saved_jvm_path[MAXPATHLEN] = {0};
1671
1672// Find the full path to the current module, libjvm.so.
1673void os::jvm_path(char *buf, jint buflen) {
1674  // Error checking.
1675  if (buflen < MAXPATHLEN) {
1676    assert(false, "must use a large-enough buffer");
1677    buf[0] = '\0';
1678    return;
1679  }
1680  // Lazy resolve the path to current module.
1681  if (saved_jvm_path[0] != 0) {
1682    strcpy(buf, saved_jvm_path);
1683    return;
1684  }
1685
1686  Dl_info dlinfo;
1687  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1688  assert(ret != 0, "cannot locate libjvm");
1689  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1690  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1691
1692  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1693  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1694}
1695
1696void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1697  // no prefix required, not even "_"
1698}
1699
1700void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1701  // no suffix required
1702}
1703
1704////////////////////////////////////////////////////////////////////////////////
1705// sun.misc.Signal support
1706
1707static volatile jint sigint_count = 0;
1708
1709static void
1710UserHandler(int sig, void *siginfo, void *context) {
1711  // 4511530 - sem_post is serialized and handled by the manager thread. When
1712  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1713  // don't want to flood the manager thread with sem_post requests.
1714  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1715    return;
1716
1717  // Ctrl-C is pressed during error reporting, likely because the error
1718  // handler fails to abort. Let VM die immediately.
1719  if (sig == SIGINT && is_error_reported()) {
1720    os::die();
1721  }
1722
1723  os::signal_notify(sig);
1724}
1725
1726void* os::user_handler() {
1727  return CAST_FROM_FN_PTR(void*, UserHandler);
1728}
1729
1730extern "C" {
1731  typedef void (*sa_handler_t)(int);
1732  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1733}
1734
1735void* os::signal(int signal_number, void* handler) {
1736  struct sigaction sigAct, oldSigAct;
1737
1738  sigfillset(&(sigAct.sa_mask));
1739
1740  // Do not block out synchronous signals in the signal handler.
1741  // Blocking synchronous signals only makes sense if you can really
1742  // be sure that those signals won't happen during signal handling,
1743  // when the blocking applies. Normal signal handlers are lean and
1744  // do not cause signals. But our signal handlers tend to be "risky"
1745  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1746  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1747  // by a SIGILL, which was blocked due to the signal mask. The process
1748  // just hung forever. Better to crash from a secondary signal than to hang.
1749  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1750  sigdelset(&(sigAct.sa_mask), SIGBUS);
1751  sigdelset(&(sigAct.sa_mask), SIGILL);
1752  sigdelset(&(sigAct.sa_mask), SIGFPE);
1753  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1754
1755  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1756
1757  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1758
1759  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1760    // -1 means registration failed
1761    return (void *)-1;
1762  }
1763
1764  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1765}
1766
1767void os::signal_raise(int signal_number) {
1768  ::raise(signal_number);
1769}
1770
1771//
1772// The following code is moved from os.cpp for making this
1773// code platform specific, which it is by its very nature.
1774//
1775
1776// Will be modified when max signal is changed to be dynamic
1777int os::sigexitnum_pd() {
1778  return NSIG;
1779}
1780
1781// a counter for each possible signal value
1782static volatile jint pending_signals[NSIG+1] = { 0 };
1783
1784// Wrapper functions for: sem_init(), sem_post(), sem_wait()
1785// On AIX, we use sem_init(), sem_post(), sem_wait()
1786// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1787// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1788// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1789// on AIX, msem_..() calls are suspected of causing problems.
1790static sem_t sig_sem;
1791static msemaphore* p_sig_msem = 0;
1792
1793static void local_sem_init() {
1794  if (os::Aix::on_aix()) {
1795    int rc = ::sem_init(&sig_sem, 0, 0);
1796    guarantee(rc != -1, "sem_init failed");
1797  } else {
1798    // Memory semaphores must live in shared mem.
1799    guarantee0(p_sig_msem == NULL);
1800    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1801    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1802    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1803  }
1804}
1805
1806static void local_sem_post() {
1807  static bool warn_only_once = false;
1808  if (os::Aix::on_aix()) {
1809    int rc = ::sem_post(&sig_sem);
1810    if (rc == -1 && !warn_only_once) {
1811      trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
1812      warn_only_once = true;
1813    }
1814  } else {
1815    guarantee0(p_sig_msem != NULL);
1816    int rc = ::msem_unlock(p_sig_msem, 0);
1817    if (rc == -1 && !warn_only_once) {
1818      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
1819      warn_only_once = true;
1820    }
1821  }
1822}
1823
1824static void local_sem_wait() {
1825  static bool warn_only_once = false;
1826  if (os::Aix::on_aix()) {
1827    int rc = ::sem_wait(&sig_sem);
1828    if (rc == -1 && !warn_only_once) {
1829      trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
1830      warn_only_once = true;
1831    }
1832  } else {
1833    guarantee0(p_sig_msem != NULL); // must init before use
1834    int rc = ::msem_lock(p_sig_msem, 0);
1835    if (rc == -1 && !warn_only_once) {
1836      trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
1837      warn_only_once = true;
1838    }
1839  }
1840}
1841
1842void os::signal_init_pd() {
1843  // Initialize signal structures
1844  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1845
1846  // Initialize signal semaphore
1847  local_sem_init();
1848}
1849
1850void os::signal_notify(int sig) {
1851  Atomic::inc(&pending_signals[sig]);
1852  local_sem_post();
1853}
1854
1855static int check_pending_signals(bool wait) {
1856  Atomic::store(0, &sigint_count);
1857  for (;;) {
1858    for (int i = 0; i < NSIG + 1; i++) {
1859      jint n = pending_signals[i];
1860      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1861        return i;
1862      }
1863    }
1864    if (!wait) {
1865      return -1;
1866    }
1867    JavaThread *thread = JavaThread::current();
1868    ThreadBlockInVM tbivm(thread);
1869
1870    bool threadIsSuspended;
1871    do {
1872      thread->set_suspend_equivalent();
1873      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1874
1875      local_sem_wait();
1876
1877      // were we externally suspended while we were waiting?
1878      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1879      if (threadIsSuspended) {
1880        //
1881        // The semaphore has been incremented, but while we were waiting
1882        // another thread suspended us. We don't want to continue running
1883        // while suspended because that would surprise the thread that
1884        // suspended us.
1885        //
1886
1887        local_sem_post();
1888
1889        thread->java_suspend_self();
1890      }
1891    } while (threadIsSuspended);
1892  }
1893}
1894
1895int os::signal_lookup() {
1896  return check_pending_signals(false);
1897}
1898
1899int os::signal_wait() {
1900  return check_pending_signals(true);
1901}
1902
1903////////////////////////////////////////////////////////////////////////////////
1904// Virtual Memory
1905
1906// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1907
1908#define VMEM_MAPPED  1
1909#define VMEM_SHMATED 2
1910
1911struct vmembk_t {
1912  int type;         // 1 - mmap, 2 - shmat
1913  char* addr;
1914  size_t size;      // Real size, may be larger than usersize.
1915  size_t pagesize;  // page size of area
1916  vmembk_t* next;
1917
1918  bool contains_addr(char* p) const {
1919    return p >= addr && p < (addr + size);
1920  }
1921
1922  bool contains_range(char* p, size_t s) const {
1923    return contains_addr(p) && contains_addr(p + s - 1);
1924  }
1925
1926  void print_on(outputStream* os) const {
1927    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1928      " bytes, %d %s pages), %s",
1929      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1930      (type == VMEM_SHMATED ? "shmat" : "mmap")
1931    );
1932  }
1933
1934  // Check that range is a sub range of memory block (or equal to memory block);
1935  // also check that range is fully page aligned to the page size if the block.
1936  void assert_is_valid_subrange(char* p, size_t s) const {
1937    if (!contains_range(p, s)) {
1938      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1939              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1940              p, p + s, addr, addr + size);
1941      guarantee0(false);
1942    }
1943    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1944      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1945              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1946      guarantee0(false);
1947    }
1948  }
1949};
1950
1951static struct {
1952  vmembk_t* first;
1953  MiscUtils::CritSect cs;
1954} vmem;
1955
1956static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1957  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1958  assert0(p);
1959  if (p) {
1960    MiscUtils::AutoCritSect lck(&vmem.cs);
1961    p->addr = addr; p->size = size;
1962    p->pagesize = pagesize;
1963    p->type = type;
1964    p->next = vmem.first;
1965    vmem.first = p;
1966  }
1967}
1968
1969static vmembk_t* vmembk_find(char* addr) {
1970  MiscUtils::AutoCritSect lck(&vmem.cs);
1971  for (vmembk_t* p = vmem.first; p; p = p->next) {
1972    if (p->addr <= addr && (p->addr + p->size) > addr) {
1973      return p;
1974    }
1975  }
1976  return NULL;
1977}
1978
1979static void vmembk_remove(vmembk_t* p0) {
1980  MiscUtils::AutoCritSect lck(&vmem.cs);
1981  assert0(p0);
1982  assert0(vmem.first); // List should not be empty.
1983  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1984    if (*pp == p0) {
1985      *pp = p0->next;
1986      ::free(p0);
1987      return;
1988    }
1989  }
1990  assert0(false); // Not found?
1991}
1992
1993static void vmembk_print_on(outputStream* os) {
1994  MiscUtils::AutoCritSect lck(&vmem.cs);
1995  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1996    vmi->print_on(os);
1997    os->cr();
1998  }
1999}
2000
2001// Reserve and attach a section of System V memory.
2002// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
2003// address. Failing that, it will attach the memory anywhere.
2004// If <requested_addr> is NULL, function will attach the memory anywhere.
2005//
2006// <alignment_hint> is being ignored by this function. It is very probable however that the
2007// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
2008// Should this be not enogh, we can put more work into it.
2009static char* reserve_shmated_memory (
2010  size_t bytes,
2011  char* requested_addr,
2012  size_t alignment_hint) {
2013
2014  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
2015    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
2016    bytes, requested_addr, alignment_hint);
2017
2018  // Either give me wish address or wish alignment but not both.
2019  assert0(!(requested_addr != NULL && alignment_hint != 0));
2020
2021  // We must prevent anyone from attaching too close to the
2022  // BRK because that may cause malloc OOM.
2023  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2024    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2025      "Will attach anywhere.", requested_addr);
2026    // Act like the OS refused to attach there.
2027    requested_addr = NULL;
2028  }
2029
2030  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2031  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2032  if (os::Aix::on_pase_V5R4_or_older()) {
2033    ShouldNotReachHere();
2034  }
2035
2036  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2037  const size_t size = align_size_up(bytes, SIZE_64K);
2038
2039  // Reserve the shared segment.
2040  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2041  if (shmid == -1) {
2042    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2043    return NULL;
2044  }
2045
2046  // Important note:
2047  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2048  // We must right after attaching it remove it from the system. System V shm segments are global and
2049  // survive the process.
2050  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2051
2052  struct shmid_ds shmbuf;
2053  memset(&shmbuf, 0, sizeof(shmbuf));
2054  shmbuf.shm_pagesize = SIZE_64K;
2055  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2056    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2057               size / SIZE_64K, errno);
2058    // I want to know if this ever happens.
2059    assert(false, "failed to set page size for shmat");
2060  }
2061
2062  // Now attach the shared segment.
2063  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2064  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2065  // were not a segment boundary.
2066  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2067  const int errno_shmat = errno;
2068
2069  // (A) Right after shmat and before handing shmat errors delete the shm segment.
2070  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2071    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2072    assert(false, "failed to remove shared memory segment!");
2073  }
2074
2075  // Handle shmat error. If we failed to attach, just return.
2076  if (addr == (char*)-1) {
2077    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2078    return NULL;
2079  }
2080
2081  // Just for info: query the real page size. In case setting the page size did not
2082  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2083  const size_t real_pagesize = os::Aix::query_pagesize(addr);
2084  if (real_pagesize != shmbuf.shm_pagesize) {
2085    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2086  }
2087
2088  if (addr) {
2089    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2090      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2091  } else {
2092    if (requested_addr != NULL) {
2093      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2094    } else {
2095      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2096    }
2097  }
2098
2099  // book-keeping
2100  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2101  assert0(is_aligned_to(addr, os::vm_page_size()));
2102
2103  return addr;
2104}
2105
2106static bool release_shmated_memory(char* addr, size_t size) {
2107
2108  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2109    addr, addr + size - 1);
2110
2111  bool rc = false;
2112
2113  // TODO: is there a way to verify shm size without doing bookkeeping?
2114  if (::shmdt(addr) != 0) {
2115    trcVerbose("error (%d).", errno);
2116  } else {
2117    trcVerbose("ok.");
2118    rc = true;
2119  }
2120  return rc;
2121}
2122
2123static bool uncommit_shmated_memory(char* addr, size_t size) {
2124  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2125    addr, addr + size - 1);
2126
2127  const bool rc = my_disclaim64(addr, size);
2128
2129  if (!rc) {
2130    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2131    return false;
2132  }
2133  return true;
2134}
2135
2136////////////////////////////////  mmap-based routines /////////////////////////////////
2137
2138// Reserve memory via mmap.
2139// If <requested_addr> is given, an attempt is made to attach at the given address.
2140// Failing that, memory is allocated at any address.
2141// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2142// allocate at an address aligned with the given alignment. Failing that, memory
2143// is aligned anywhere.
2144static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2145  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2146    "alignment_hint " UINTX_FORMAT "...",
2147    bytes, requested_addr, alignment_hint);
2148
2149  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2150  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2151    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2152    return NULL;
2153  }
2154
2155  // We must prevent anyone from attaching too close to the
2156  // BRK because that may cause malloc OOM.
2157  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2158    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2159      "Will attach anywhere.", requested_addr);
2160    // Act like the OS refused to attach there.
2161    requested_addr = NULL;
2162  }
2163
2164  // Specify one or the other but not both.
2165  assert0(!(requested_addr != NULL && alignment_hint > 0));
2166
2167  // In 64K mode, we claim the global page size (os::vm_page_size())
2168  // is 64K. This is one of the few points where that illusion may
2169  // break, because mmap() will always return memory aligned to 4K. So
2170  // we must ensure we only ever return memory aligned to 64k.
2171  if (alignment_hint) {
2172    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2173  } else {
2174    alignment_hint = os::vm_page_size();
2175  }
2176
2177  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2178  const size_t size = align_size_up(bytes, os::vm_page_size());
2179
2180  // alignment: Allocate memory large enough to include an aligned range of the right size and
2181  // cut off the leading and trailing waste pages.
2182  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2183  const size_t extra_size = size + alignment_hint;
2184
2185  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2186  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2187  int flags = MAP_ANONYMOUS | MAP_SHARED;
2188
2189  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2190  // it means if wishaddress is given but MAP_FIXED is not set.
2191  //
2192  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2193  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2194  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2195  // get clobbered.
2196  if (requested_addr != NULL) {
2197    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2198      flags |= MAP_FIXED;
2199    }
2200  }
2201
2202  char* addr = (char*)::mmap(requested_addr, extra_size,
2203      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2204
2205  if (addr == MAP_FAILED) {
2206    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2207    return NULL;
2208  }
2209
2210  // Handle alignment.
2211  char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2212  const size_t waste_pre = addr_aligned - addr;
2213  char* const addr_aligned_end = addr_aligned + size;
2214  const size_t waste_post = extra_size - waste_pre - size;
2215  if (waste_pre > 0) {
2216    ::munmap(addr, waste_pre);
2217  }
2218  if (waste_post > 0) {
2219    ::munmap(addr_aligned_end, waste_post);
2220  }
2221  addr = addr_aligned;
2222
2223  if (addr) {
2224    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2225      addr, addr + bytes, bytes);
2226  } else {
2227    if (requested_addr != NULL) {
2228      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2229    } else {
2230      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2231    }
2232  }
2233
2234  // bookkeeping
2235  vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2236
2237  // Test alignment, see above.
2238  assert0(is_aligned_to(addr, os::vm_page_size()));
2239
2240  return addr;
2241}
2242
2243static bool release_mmaped_memory(char* addr, size_t size) {
2244  assert0(is_aligned_to(addr, os::vm_page_size()));
2245  assert0(is_aligned_to(size, os::vm_page_size()));
2246
2247  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2248    addr, addr + size - 1);
2249  bool rc = false;
2250
2251  if (::munmap(addr, size) != 0) {
2252    trcVerbose("failed (%d)\n", errno);
2253    rc = false;
2254  } else {
2255    trcVerbose("ok.");
2256    rc = true;
2257  }
2258
2259  return rc;
2260}
2261
2262static bool uncommit_mmaped_memory(char* addr, size_t size) {
2263
2264  assert0(is_aligned_to(addr, os::vm_page_size()));
2265  assert0(is_aligned_to(size, os::vm_page_size()));
2266
2267  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2268    addr, addr + size - 1);
2269  bool rc = false;
2270
2271  // Uncommit mmap memory with msync MS_INVALIDATE.
2272  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2273    trcVerbose("failed (%d)\n", errno);
2274    rc = false;
2275  } else {
2276    trcVerbose("ok.");
2277    rc = true;
2278  }
2279
2280  return rc;
2281}
2282
2283int os::vm_page_size() {
2284  // Seems redundant as all get out.
2285  assert(os::Aix::page_size() != -1, "must call os::init");
2286  return os::Aix::page_size();
2287}
2288
2289// Aix allocates memory by pages.
2290int os::vm_allocation_granularity() {
2291  assert(os::Aix::page_size() != -1, "must call os::init");
2292  return os::Aix::page_size();
2293}
2294
2295#ifdef PRODUCT
2296static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2297                                    int err) {
2298  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2299          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2300          strerror(err), err);
2301}
2302#endif
2303
2304void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2305                                  const char* mesg) {
2306  assert(mesg != NULL, "mesg must be specified");
2307  if (!pd_commit_memory(addr, size, exec)) {
2308    // Add extra info in product mode for vm_exit_out_of_memory():
2309    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2310    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2311  }
2312}
2313
2314bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2315
2316  assert(is_aligned_to(addr, os::vm_page_size()),
2317    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2318    p2i(addr), os::vm_page_size());
2319  assert(is_aligned_to(size, os::vm_page_size()),
2320    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2321    size, os::vm_page_size());
2322
2323  vmembk_t* const vmi = vmembk_find(addr);
2324  guarantee0(vmi);
2325  vmi->assert_is_valid_subrange(addr, size);
2326
2327  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2328
2329  if (UseExplicitCommit) {
2330    // AIX commits memory on touch. So, touch all pages to be committed.
2331    for (char* p = addr; p < (addr + size); p += SIZE_4K) {
2332      *p = '\0';
2333    }
2334  }
2335
2336  return true;
2337}
2338
2339bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2340  return pd_commit_memory(addr, size, exec);
2341}
2342
2343void os::pd_commit_memory_or_exit(char* addr, size_t size,
2344                                  size_t alignment_hint, bool exec,
2345                                  const char* mesg) {
2346  // Alignment_hint is ignored on this OS.
2347  pd_commit_memory_or_exit(addr, size, exec, mesg);
2348}
2349
2350bool os::pd_uncommit_memory(char* addr, size_t size) {
2351  assert(is_aligned_to(addr, os::vm_page_size()),
2352    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2353    p2i(addr), os::vm_page_size());
2354  assert(is_aligned_to(size, os::vm_page_size()),
2355    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2356    size, os::vm_page_size());
2357
2358  // Dynamically do different things for mmap/shmat.
2359  const vmembk_t* const vmi = vmembk_find(addr);
2360  guarantee0(vmi);
2361  vmi->assert_is_valid_subrange(addr, size);
2362
2363  if (vmi->type == VMEM_SHMATED) {
2364    return uncommit_shmated_memory(addr, size);
2365  } else {
2366    return uncommit_mmaped_memory(addr, size);
2367  }
2368}
2369
2370bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2371  // Do not call this; no need to commit stack pages on AIX.
2372  ShouldNotReachHere();
2373  return true;
2374}
2375
2376bool os::remove_stack_guard_pages(char* addr, size_t size) {
2377  // Do not call this; no need to commit stack pages on AIX.
2378  ShouldNotReachHere();
2379  return true;
2380}
2381
2382void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2383}
2384
2385void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2386}
2387
2388void os::numa_make_global(char *addr, size_t bytes) {
2389}
2390
2391void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2392}
2393
2394bool os::numa_topology_changed() {
2395  return false;
2396}
2397
2398size_t os::numa_get_groups_num() {
2399  return 1;
2400}
2401
2402int os::numa_get_group_id() {
2403  return 0;
2404}
2405
2406size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2407  if (size > 0) {
2408    ids[0] = 0;
2409    return 1;
2410  }
2411  return 0;
2412}
2413
2414bool os::get_page_info(char *start, page_info* info) {
2415  return false;
2416}
2417
2418char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2419  return end;
2420}
2421
2422// Reserves and attaches a shared memory segment.
2423// Will assert if a wish address is given and could not be obtained.
2424char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2425
2426  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2427  // thereby clobbering old mappings at that place. That is probably
2428  // not intended, never used and almost certainly an error were it
2429  // ever be used this way (to try attaching at a specified address
2430  // without clobbering old mappings an alternate API exists,
2431  // os::attempt_reserve_memory_at()).
2432  // Instead of mimicking the dangerous coding of the other platforms, here I
2433  // just ignore the request address (release) or assert(debug).
2434  assert0(requested_addr == NULL);
2435
2436  // Always round to os::vm_page_size(), which may be larger than 4K.
2437  bytes = align_size_up(bytes, os::vm_page_size());
2438  const size_t alignment_hint0 =
2439    alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2440
2441  // In 4K mode always use mmap.
2442  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2443  if (os::vm_page_size() == SIZE_4K) {
2444    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2445  } else {
2446    if (bytes >= Use64KPagesThreshold) {
2447      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2448    } else {
2449      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2450    }
2451  }
2452}
2453
2454bool os::pd_release_memory(char* addr, size_t size) {
2455
2456  // Dynamically do different things for mmap/shmat.
2457  vmembk_t* const vmi = vmembk_find(addr);
2458  guarantee0(vmi);
2459
2460  // Always round to os::vm_page_size(), which may be larger than 4K.
2461  size = align_size_up(size, os::vm_page_size());
2462  addr = (char *)align_ptr_up(addr, os::vm_page_size());
2463
2464  bool rc = false;
2465  bool remove_bookkeeping = false;
2466  if (vmi->type == VMEM_SHMATED) {
2467    // For shmatted memory, we do:
2468    // - If user wants to release the whole range, release the memory (shmdt).
2469    // - If user only wants to release a partial range, uncommit (disclaim) that
2470    //   range. That way, at least, we do not use memory anymore (bust still page
2471    //   table space).
2472    vmi->assert_is_valid_subrange(addr, size);
2473    if (addr == vmi->addr && size == vmi->size) {
2474      rc = release_shmated_memory(addr, size);
2475      remove_bookkeeping = true;
2476    } else {
2477      rc = uncommit_shmated_memory(addr, size);
2478    }
2479  } else {
2480    // User may unmap partial regions but region has to be fully contained.
2481#ifdef ASSERT
2482    vmi->assert_is_valid_subrange(addr, size);
2483#endif
2484    rc = release_mmaped_memory(addr, size);
2485    remove_bookkeeping = true;
2486  }
2487
2488  // update bookkeeping
2489  if (rc && remove_bookkeeping) {
2490    vmembk_remove(vmi);
2491  }
2492
2493  return rc;
2494}
2495
2496static bool checked_mprotect(char* addr, size_t size, int prot) {
2497
2498  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2499  // not tell me if protection failed when trying to protect an un-protectable range.
2500  //
2501  // This means if the memory was allocated using shmget/shmat, protection wont work
2502  // but mprotect will still return 0:
2503  //
2504  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2505
2506  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2507
2508  if (!rc) {
2509    const char* const s_errno = strerror(errno);
2510    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2511    return false;
2512  }
2513
2514  // mprotect success check
2515  //
2516  // Mprotect said it changed the protection but can I believe it?
2517  //
2518  // To be sure I need to check the protection afterwards. Try to
2519  // read from protected memory and check whether that causes a segfault.
2520  //
2521  if (!os::Aix::xpg_sus_mode()) {
2522
2523    if (CanUseSafeFetch32()) {
2524
2525      const bool read_protected =
2526        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2527         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2528
2529      if (prot & PROT_READ) {
2530        rc = !read_protected;
2531      } else {
2532        rc = read_protected;
2533      }
2534
2535      if (!rc) {
2536        if (os::Aix::on_pase()) {
2537          // There is an issue on older PASE systems where mprotect() will return success but the
2538          // memory will not be protected.
2539          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2540          // machines; we only see it rarely, when using mprotect() to protect the guard page of
2541          // a stack. It is an OS error.
2542          //
2543          // A valid strategy is just to try again. This usually works. :-/
2544
2545          ::usleep(1000);
2546          if (::mprotect(addr, size, prot) == 0) {
2547            const bool read_protected_2 =
2548              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2549              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2550            rc = true;
2551          }
2552        }
2553      }
2554    }
2555  }
2556
2557  assert(rc == true, "mprotect failed.");
2558
2559  return rc;
2560}
2561
2562// Set protections specified
2563bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2564  unsigned int p = 0;
2565  switch (prot) {
2566  case MEM_PROT_NONE: p = PROT_NONE; break;
2567  case MEM_PROT_READ: p = PROT_READ; break;
2568  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2569  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2570  default:
2571    ShouldNotReachHere();
2572  }
2573  // is_committed is unused.
2574  return checked_mprotect(addr, size, p);
2575}
2576
2577bool os::guard_memory(char* addr, size_t size) {
2578  return checked_mprotect(addr, size, PROT_NONE);
2579}
2580
2581bool os::unguard_memory(char* addr, size_t size) {
2582  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2583}
2584
2585// Large page support
2586
2587static size_t _large_page_size = 0;
2588
2589// Enable large page support if OS allows that.
2590void os::large_page_init() {
2591  return; // Nothing to do. See query_multipage_support and friends.
2592}
2593
2594char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2595  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2596  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2597  // so this is not needed.
2598  assert(false, "should not be called on AIX");
2599  return NULL;
2600}
2601
2602bool os::release_memory_special(char* base, size_t bytes) {
2603  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2604  Unimplemented();
2605  return false;
2606}
2607
2608size_t os::large_page_size() {
2609  return _large_page_size;
2610}
2611
2612bool os::can_commit_large_page_memory() {
2613  // Does not matter, we do not support huge pages.
2614  return false;
2615}
2616
2617bool os::can_execute_large_page_memory() {
2618  // Does not matter, we do not support huge pages.
2619  return false;
2620}
2621
2622// Reserve memory at an arbitrary address, only if that area is
2623// available (and not reserved for something else).
2624char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2625  char* addr = NULL;
2626
2627  // Always round to os::vm_page_size(), which may be larger than 4K.
2628  bytes = align_size_up(bytes, os::vm_page_size());
2629
2630  // In 4K mode always use mmap.
2631  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2632  if (os::vm_page_size() == SIZE_4K) {
2633    return reserve_mmaped_memory(bytes, requested_addr, 0);
2634  } else {
2635    if (bytes >= Use64KPagesThreshold) {
2636      return reserve_shmated_memory(bytes, requested_addr, 0);
2637    } else {
2638      return reserve_mmaped_memory(bytes, requested_addr, 0);
2639    }
2640  }
2641
2642  return addr;
2643}
2644
2645size_t os::read(int fd, void *buf, unsigned int nBytes) {
2646  return ::read(fd, buf, nBytes);
2647}
2648
2649size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2650  return ::pread(fd, buf, nBytes, offset);
2651}
2652
2653void os::naked_short_sleep(jlong ms) {
2654  struct timespec req;
2655
2656  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2657  req.tv_sec = 0;
2658  if (ms > 0) {
2659    req.tv_nsec = (ms % 1000) * 1000000;
2660  }
2661  else {
2662    req.tv_nsec = 1;
2663  }
2664
2665  nanosleep(&req, NULL);
2666
2667  return;
2668}
2669
2670// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2671void os::infinite_sleep() {
2672  while (true) {    // sleep forever ...
2673    ::sleep(100);   // ... 100 seconds at a time
2674  }
2675}
2676
2677// Used to convert frequent JVM_Yield() to nops
2678bool os::dont_yield() {
2679  return DontYieldALot;
2680}
2681
2682void os::naked_yield() {
2683  sched_yield();
2684}
2685
2686////////////////////////////////////////////////////////////////////////////////
2687// thread priority support
2688
2689// From AIX manpage to pthread_setschedparam
2690// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2691//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2692//
2693// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2694// range from 40 to 80, where 40 is the least favored priority and 80
2695// is the most favored."
2696//
2697// (Actually, I doubt this even has an impact on AIX, as we do kernel
2698// scheduling there; however, this still leaves iSeries.)
2699//
2700// We use the same values for AIX and PASE.
2701int os::java_to_os_priority[CriticalPriority + 1] = {
2702  54,             // 0 Entry should never be used
2703
2704  55,             // 1 MinPriority
2705  55,             // 2
2706  56,             // 3
2707
2708  56,             // 4
2709  57,             // 5 NormPriority
2710  57,             // 6
2711
2712  58,             // 7
2713  58,             // 8
2714  59,             // 9 NearMaxPriority
2715
2716  60,             // 10 MaxPriority
2717
2718  60              // 11 CriticalPriority
2719};
2720
2721OSReturn os::set_native_priority(Thread* thread, int newpri) {
2722  if (!UseThreadPriorities) return OS_OK;
2723  pthread_t thr = thread->osthread()->pthread_id();
2724  int policy = SCHED_OTHER;
2725  struct sched_param param;
2726  param.sched_priority = newpri;
2727  int ret = pthread_setschedparam(thr, policy, &param);
2728
2729  if (ret != 0) {
2730    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2731        (int)thr, newpri, ret, strerror(ret));
2732  }
2733  return (ret == 0) ? OS_OK : OS_ERR;
2734}
2735
2736OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2737  if (!UseThreadPriorities) {
2738    *priority_ptr = java_to_os_priority[NormPriority];
2739    return OS_OK;
2740  }
2741  pthread_t thr = thread->osthread()->pthread_id();
2742  int policy = SCHED_OTHER;
2743  struct sched_param param;
2744  int ret = pthread_getschedparam(thr, &policy, &param);
2745  *priority_ptr = param.sched_priority;
2746
2747  return (ret == 0) ? OS_OK : OS_ERR;
2748}
2749
2750// Hint to the underlying OS that a task switch would not be good.
2751// Void return because it's a hint and can fail.
2752void os::hint_no_preempt() {}
2753
2754////////////////////////////////////////////////////////////////////////////////
2755// suspend/resume support
2756
2757//  the low-level signal-based suspend/resume support is a remnant from the
2758//  old VM-suspension that used to be for java-suspension, safepoints etc,
2759//  within hotspot. Now there is a single use-case for this:
2760//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2761//      that runs in the watcher thread.
2762//  The remaining code is greatly simplified from the more general suspension
2763//  code that used to be used.
2764//
2765//  The protocol is quite simple:
2766//  - suspend:
2767//      - sends a signal to the target thread
2768//      - polls the suspend state of the osthread using a yield loop
2769//      - target thread signal handler (SR_handler) sets suspend state
2770//        and blocks in sigsuspend until continued
2771//  - resume:
2772//      - sets target osthread state to continue
2773//      - sends signal to end the sigsuspend loop in the SR_handler
2774//
2775//  Note that the SR_lock plays no role in this suspend/resume protocol.
2776//
2777
2778static void resume_clear_context(OSThread *osthread) {
2779  osthread->set_ucontext(NULL);
2780  osthread->set_siginfo(NULL);
2781}
2782
2783static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2784  osthread->set_ucontext(context);
2785  osthread->set_siginfo(siginfo);
2786}
2787
2788//
2789// Handler function invoked when a thread's execution is suspended or
2790// resumed. We have to be careful that only async-safe functions are
2791// called here (Note: most pthread functions are not async safe and
2792// should be avoided.)
2793//
2794// Note: sigwait() is a more natural fit than sigsuspend() from an
2795// interface point of view, but sigwait() prevents the signal hander
2796// from being run. libpthread would get very confused by not having
2797// its signal handlers run and prevents sigwait()'s use with the
2798// mutex granting granting signal.
2799//
2800// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2801//
2802static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2803  // Save and restore errno to avoid confusing native code with EINTR
2804  // after sigsuspend.
2805  int old_errno = errno;
2806
2807  Thread* thread = Thread::current();
2808  OSThread* osthread = thread->osthread();
2809  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2810
2811  os::SuspendResume::State current = osthread->sr.state();
2812  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2813    suspend_save_context(osthread, siginfo, context);
2814
2815    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2816    os::SuspendResume::State state = osthread->sr.suspended();
2817    if (state == os::SuspendResume::SR_SUSPENDED) {
2818      sigset_t suspend_set;  // signals for sigsuspend()
2819
2820      // get current set of blocked signals and unblock resume signal
2821      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2822      sigdelset(&suspend_set, SR_signum);
2823
2824      // wait here until we are resumed
2825      while (1) {
2826        sigsuspend(&suspend_set);
2827
2828        os::SuspendResume::State result = osthread->sr.running();
2829        if (result == os::SuspendResume::SR_RUNNING) {
2830          break;
2831        }
2832      }
2833
2834    } else if (state == os::SuspendResume::SR_RUNNING) {
2835      // request was cancelled, continue
2836    } else {
2837      ShouldNotReachHere();
2838    }
2839
2840    resume_clear_context(osthread);
2841  } else if (current == os::SuspendResume::SR_RUNNING) {
2842    // request was cancelled, continue
2843  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2844    // ignore
2845  } else {
2846    ShouldNotReachHere();
2847  }
2848
2849  errno = old_errno;
2850}
2851
2852static int SR_initialize() {
2853  struct sigaction act;
2854  char *s;
2855  // Get signal number to use for suspend/resume
2856  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2857    int sig = ::strtol(s, 0, 10);
2858    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2859        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2860      SR_signum = sig;
2861    } else {
2862      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2863              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2864    }
2865  }
2866
2867  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2868        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2869
2870  sigemptyset(&SR_sigset);
2871  sigaddset(&SR_sigset, SR_signum);
2872
2873  // Set up signal handler for suspend/resume.
2874  act.sa_flags = SA_RESTART|SA_SIGINFO;
2875  act.sa_handler = (void (*)(int)) SR_handler;
2876
2877  // SR_signum is blocked by default.
2878  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2879
2880  if (sigaction(SR_signum, &act, 0) == -1) {
2881    return -1;
2882  }
2883
2884  // Save signal flag
2885  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2886  return 0;
2887}
2888
2889static int SR_finalize() {
2890  return 0;
2891}
2892
2893static int sr_notify(OSThread* osthread) {
2894  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2895  assert_status(status == 0, status, "pthread_kill");
2896  return status;
2897}
2898
2899// "Randomly" selected value for how long we want to spin
2900// before bailing out on suspending a thread, also how often
2901// we send a signal to a thread we want to resume
2902static const int RANDOMLY_LARGE_INTEGER = 1000000;
2903static const int RANDOMLY_LARGE_INTEGER2 = 100;
2904
2905// returns true on success and false on error - really an error is fatal
2906// but this seems the normal response to library errors
2907static bool do_suspend(OSThread* osthread) {
2908  assert(osthread->sr.is_running(), "thread should be running");
2909  // mark as suspended and send signal
2910
2911  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2912    // failed to switch, state wasn't running?
2913    ShouldNotReachHere();
2914    return false;
2915  }
2916
2917  if (sr_notify(osthread) != 0) {
2918    // try to cancel, switch to running
2919
2920    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2921    if (result == os::SuspendResume::SR_RUNNING) {
2922      // cancelled
2923      return false;
2924    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2925      // somehow managed to suspend
2926      return true;
2927    } else {
2928      ShouldNotReachHere();
2929      return false;
2930    }
2931  }
2932
2933  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2934
2935  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2936    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2937      os::naked_yield();
2938    }
2939
2940    // timeout, try to cancel the request
2941    if (n >= RANDOMLY_LARGE_INTEGER) {
2942      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2943      if (cancelled == os::SuspendResume::SR_RUNNING) {
2944        return false;
2945      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2946        return true;
2947      } else {
2948        ShouldNotReachHere();
2949        return false;
2950      }
2951    }
2952  }
2953
2954  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2955  return true;
2956}
2957
2958static void do_resume(OSThread* osthread) {
2959  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2960
2961  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2962    // failed to switch to WAKEUP_REQUEST
2963    ShouldNotReachHere();
2964    return;
2965  }
2966
2967  while (!osthread->sr.is_running()) {
2968    if (sr_notify(osthread) == 0) {
2969      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2970        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2971          os::naked_yield();
2972        }
2973      }
2974    } else {
2975      ShouldNotReachHere();
2976    }
2977  }
2978
2979  guarantee(osthread->sr.is_running(), "Must be running!");
2980}
2981
2982///////////////////////////////////////////////////////////////////////////////////
2983// signal handling (except suspend/resume)
2984
2985// This routine may be used by user applications as a "hook" to catch signals.
2986// The user-defined signal handler must pass unrecognized signals to this
2987// routine, and if it returns true (non-zero), then the signal handler must
2988// return immediately. If the flag "abort_if_unrecognized" is true, then this
2989// routine will never retun false (zero), but instead will execute a VM panic
2990// routine kill the process.
2991//
2992// If this routine returns false, it is OK to call it again. This allows
2993// the user-defined signal handler to perform checks either before or after
2994// the VM performs its own checks. Naturally, the user code would be making
2995// a serious error if it tried to handle an exception (such as a null check
2996// or breakpoint) that the VM was generating for its own correct operation.
2997//
2998// This routine may recognize any of the following kinds of signals:
2999//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3000// It should be consulted by handlers for any of those signals.
3001//
3002// The caller of this routine must pass in the three arguments supplied
3003// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3004// field of the structure passed to sigaction(). This routine assumes that
3005// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3006//
3007// Note that the VM will print warnings if it detects conflicting signal
3008// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3009//
3010extern "C" JNIEXPORT int
3011JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3012
3013// Set thread signal mask (for some reason on AIX sigthreadmask() seems
3014// to be the thing to call; documentation is not terribly clear about whether
3015// pthread_sigmask also works, and if it does, whether it does the same.
3016bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3017  const int rc = ::pthread_sigmask(how, set, oset);
3018  // return value semantics differ slightly for error case:
3019  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3020  // (so, pthread_sigmask is more theadsafe for error handling)
3021  // But success is always 0.
3022  return rc == 0 ? true : false;
3023}
3024
3025// Function to unblock all signals which are, according
3026// to POSIX, typical program error signals. If they happen while being blocked,
3027// they typically will bring down the process immediately.
3028bool unblock_program_error_signals() {
3029  sigset_t set;
3030  ::sigemptyset(&set);
3031  ::sigaddset(&set, SIGILL);
3032  ::sigaddset(&set, SIGBUS);
3033  ::sigaddset(&set, SIGFPE);
3034  ::sigaddset(&set, SIGSEGV);
3035  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3036}
3037
3038// Renamed from 'signalHandler' to avoid collision with other shared libs.
3039void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3040  assert(info != NULL && uc != NULL, "it must be old kernel");
3041
3042  // Never leave program error signals blocked;
3043  // on all our platforms they would bring down the process immediately when
3044  // getting raised while being blocked.
3045  unblock_program_error_signals();
3046
3047  int orig_errno = errno;  // Preserve errno value over signal handler.
3048  JVM_handle_aix_signal(sig, info, uc, true);
3049  errno = orig_errno;
3050}
3051
3052// This boolean allows users to forward their own non-matching signals
3053// to JVM_handle_aix_signal, harmlessly.
3054bool os::Aix::signal_handlers_are_installed = false;
3055
3056// For signal-chaining
3057struct sigaction sigact[NSIG];
3058sigset_t sigs;
3059bool os::Aix::libjsig_is_loaded = false;
3060typedef struct sigaction *(*get_signal_t)(int);
3061get_signal_t os::Aix::get_signal_action = NULL;
3062
3063struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3064  struct sigaction *actp = NULL;
3065
3066  if (libjsig_is_loaded) {
3067    // Retrieve the old signal handler from libjsig
3068    actp = (*get_signal_action)(sig);
3069  }
3070  if (actp == NULL) {
3071    // Retrieve the preinstalled signal handler from jvm
3072    actp = get_preinstalled_handler(sig);
3073  }
3074
3075  return actp;
3076}
3077
3078static bool call_chained_handler(struct sigaction *actp, int sig,
3079                                 siginfo_t *siginfo, void *context) {
3080  // Call the old signal handler
3081  if (actp->sa_handler == SIG_DFL) {
3082    // It's more reasonable to let jvm treat it as an unexpected exception
3083    // instead of taking the default action.
3084    return false;
3085  } else if (actp->sa_handler != SIG_IGN) {
3086    if ((actp->sa_flags & SA_NODEFER) == 0) {
3087      // automaticlly block the signal
3088      sigaddset(&(actp->sa_mask), sig);
3089    }
3090
3091    sa_handler_t hand = NULL;
3092    sa_sigaction_t sa = NULL;
3093    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3094    // retrieve the chained handler
3095    if (siginfo_flag_set) {
3096      sa = actp->sa_sigaction;
3097    } else {
3098      hand = actp->sa_handler;
3099    }
3100
3101    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3102      actp->sa_handler = SIG_DFL;
3103    }
3104
3105    // try to honor the signal mask
3106    sigset_t oset;
3107    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3108
3109    // call into the chained handler
3110    if (siginfo_flag_set) {
3111      (*sa)(sig, siginfo, context);
3112    } else {
3113      (*hand)(sig);
3114    }
3115
3116    // restore the signal mask
3117    pthread_sigmask(SIG_SETMASK, &oset, 0);
3118  }
3119  // Tell jvm's signal handler the signal is taken care of.
3120  return true;
3121}
3122
3123bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3124  bool chained = false;
3125  // signal-chaining
3126  if (UseSignalChaining) {
3127    struct sigaction *actp = get_chained_signal_action(sig);
3128    if (actp != NULL) {
3129      chained = call_chained_handler(actp, sig, siginfo, context);
3130    }
3131  }
3132  return chained;
3133}
3134
3135struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3136  if (sigismember(&sigs, sig)) {
3137    return &sigact[sig];
3138  }
3139  return NULL;
3140}
3141
3142void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3143  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3144  sigact[sig] = oldAct;
3145  sigaddset(&sigs, sig);
3146}
3147
3148// for diagnostic
3149int sigflags[NSIG];
3150
3151int os::Aix::get_our_sigflags(int sig) {
3152  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3153  return sigflags[sig];
3154}
3155
3156void os::Aix::set_our_sigflags(int sig, int flags) {
3157  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3158  if (sig > 0 && sig < NSIG) {
3159    sigflags[sig] = flags;
3160  }
3161}
3162
3163void os::Aix::set_signal_handler(int sig, bool set_installed) {
3164  // Check for overwrite.
3165  struct sigaction oldAct;
3166  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3167
3168  void* oldhand = oldAct.sa_sigaction
3169    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3170    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3171  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3172      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3173      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3174    if (AllowUserSignalHandlers || !set_installed) {
3175      // Do not overwrite; user takes responsibility to forward to us.
3176      return;
3177    } else if (UseSignalChaining) {
3178      // save the old handler in jvm
3179      save_preinstalled_handler(sig, oldAct);
3180      // libjsig also interposes the sigaction() call below and saves the
3181      // old sigaction on it own.
3182    } else {
3183      fatal("Encountered unexpected pre-existing sigaction handler "
3184            "%#lx for signal %d.", (long)oldhand, sig);
3185    }
3186  }
3187
3188  struct sigaction sigAct;
3189  sigfillset(&(sigAct.sa_mask));
3190  if (!set_installed) {
3191    sigAct.sa_handler = SIG_DFL;
3192    sigAct.sa_flags = SA_RESTART;
3193  } else {
3194    sigAct.sa_sigaction = javaSignalHandler;
3195    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3196  }
3197  // Save flags, which are set by ours
3198  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3199  sigflags[sig] = sigAct.sa_flags;
3200
3201  int ret = sigaction(sig, &sigAct, &oldAct);
3202  assert(ret == 0, "check");
3203
3204  void* oldhand2 = oldAct.sa_sigaction
3205                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3206                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3207  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3208}
3209
3210// install signal handlers for signals that HotSpot needs to
3211// handle in order to support Java-level exception handling.
3212void os::Aix::install_signal_handlers() {
3213  if (!signal_handlers_are_installed) {
3214    signal_handlers_are_installed = true;
3215
3216    // signal-chaining
3217    typedef void (*signal_setting_t)();
3218    signal_setting_t begin_signal_setting = NULL;
3219    signal_setting_t end_signal_setting = NULL;
3220    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3221                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3222    if (begin_signal_setting != NULL) {
3223      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3224                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3225      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3226                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3227      libjsig_is_loaded = true;
3228      assert(UseSignalChaining, "should enable signal-chaining");
3229    }
3230    if (libjsig_is_loaded) {
3231      // Tell libjsig jvm is setting signal handlers.
3232      (*begin_signal_setting)();
3233    }
3234
3235    ::sigemptyset(&sigs);
3236    set_signal_handler(SIGSEGV, true);
3237    set_signal_handler(SIGPIPE, true);
3238    set_signal_handler(SIGBUS, true);
3239    set_signal_handler(SIGILL, true);
3240    set_signal_handler(SIGFPE, true);
3241    set_signal_handler(SIGTRAP, true);
3242    set_signal_handler(SIGXFSZ, true);
3243    set_signal_handler(SIGDANGER, true);
3244
3245    if (libjsig_is_loaded) {
3246      // Tell libjsig jvm finishes setting signal handlers.
3247      (*end_signal_setting)();
3248    }
3249
3250    // We don't activate signal checker if libjsig is in place, we trust ourselves
3251    // and if UserSignalHandler is installed all bets are off.
3252    // Log that signal checking is off only if -verbose:jni is specified.
3253    if (CheckJNICalls) {
3254      if (libjsig_is_loaded) {
3255        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3256        check_signals = false;
3257      }
3258      if (AllowUserSignalHandlers) {
3259        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3260        check_signals = false;
3261      }
3262      // Need to initialize check_signal_done.
3263      ::sigemptyset(&check_signal_done);
3264    }
3265  }
3266}
3267
3268static const char* get_signal_handler_name(address handler,
3269                                           char* buf, int buflen) {
3270  int offset;
3271  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3272  if (found) {
3273    // skip directory names
3274    const char *p1, *p2;
3275    p1 = buf;
3276    size_t len = strlen(os::file_separator());
3277    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3278    // The way os::dll_address_to_library_name is implemented on Aix
3279    // right now, it always returns -1 for the offset which is not
3280    // terribly informative.
3281    // Will fix that. For now, omit the offset.
3282    jio_snprintf(buf, buflen, "%s", p1);
3283  } else {
3284    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3285  }
3286  return buf;
3287}
3288
3289static void print_signal_handler(outputStream* st, int sig,
3290                                 char* buf, size_t buflen) {
3291  struct sigaction sa;
3292  sigaction(sig, NULL, &sa);
3293
3294  st->print("%s: ", os::exception_name(sig, buf, buflen));
3295
3296  address handler = (sa.sa_flags & SA_SIGINFO)
3297    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3298    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3299
3300  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3301    st->print("SIG_DFL");
3302  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3303    st->print("SIG_IGN");
3304  } else {
3305    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3306  }
3307
3308  // Print readable mask.
3309  st->print(", sa_mask[0]=");
3310  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3311
3312  address rh = VMError::get_resetted_sighandler(sig);
3313  // May be, handler was resetted by VMError?
3314  if (rh != NULL) {
3315    handler = rh;
3316    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3317  }
3318
3319  // Print textual representation of sa_flags.
3320  st->print(", sa_flags=");
3321  os::Posix::print_sa_flags(st, sa.sa_flags);
3322
3323  // Check: is it our handler?
3324  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3325      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3326    // It is our signal handler.
3327    // Check for flags, reset system-used one!
3328    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3329      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3330                os::Aix::get_our_sigflags(sig));
3331    }
3332  }
3333  st->cr();
3334}
3335
3336#define DO_SIGNAL_CHECK(sig) \
3337  if (!sigismember(&check_signal_done, sig)) \
3338    os::Aix::check_signal_handler(sig)
3339
3340// This method is a periodic task to check for misbehaving JNI applications
3341// under CheckJNI, we can add any periodic checks here
3342
3343void os::run_periodic_checks() {
3344
3345  if (check_signals == false) return;
3346
3347  // SEGV and BUS if overridden could potentially prevent
3348  // generation of hs*.log in the event of a crash, debugging
3349  // such a case can be very challenging, so we absolutely
3350  // check the following for a good measure:
3351  DO_SIGNAL_CHECK(SIGSEGV);
3352  DO_SIGNAL_CHECK(SIGILL);
3353  DO_SIGNAL_CHECK(SIGFPE);
3354  DO_SIGNAL_CHECK(SIGBUS);
3355  DO_SIGNAL_CHECK(SIGPIPE);
3356  DO_SIGNAL_CHECK(SIGXFSZ);
3357  if (UseSIGTRAP) {
3358    DO_SIGNAL_CHECK(SIGTRAP);
3359  }
3360  DO_SIGNAL_CHECK(SIGDANGER);
3361
3362  // ReduceSignalUsage allows the user to override these handlers
3363  // see comments at the very top and jvm_solaris.h
3364  if (!ReduceSignalUsage) {
3365    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3366    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3367    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3368    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3369  }
3370
3371  DO_SIGNAL_CHECK(SR_signum);
3372}
3373
3374typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3375
3376static os_sigaction_t os_sigaction = NULL;
3377
3378void os::Aix::check_signal_handler(int sig) {
3379  char buf[O_BUFLEN];
3380  address jvmHandler = NULL;
3381
3382  struct sigaction act;
3383  if (os_sigaction == NULL) {
3384    // only trust the default sigaction, in case it has been interposed
3385    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3386    if (os_sigaction == NULL) return;
3387  }
3388
3389  os_sigaction(sig, (struct sigaction*)NULL, &act);
3390
3391  address thisHandler = (act.sa_flags & SA_SIGINFO)
3392    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3393    : CAST_FROM_FN_PTR(address, act.sa_handler);
3394
3395  switch(sig) {
3396  case SIGSEGV:
3397  case SIGBUS:
3398  case SIGFPE:
3399  case SIGPIPE:
3400  case SIGILL:
3401  case SIGXFSZ:
3402    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3403    break;
3404
3405  case SHUTDOWN1_SIGNAL:
3406  case SHUTDOWN2_SIGNAL:
3407  case SHUTDOWN3_SIGNAL:
3408  case BREAK_SIGNAL:
3409    jvmHandler = (address)user_handler();
3410    break;
3411
3412  default:
3413    if (sig == SR_signum) {
3414      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3415    } else {
3416      return;
3417    }
3418    break;
3419  }
3420
3421  if (thisHandler != jvmHandler) {
3422    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3423    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3424    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3425    // No need to check this sig any longer
3426    sigaddset(&check_signal_done, sig);
3427    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3428    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3429      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3430                    exception_name(sig, buf, O_BUFLEN));
3431    }
3432  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3433    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3434    tty->print("expected:");
3435    os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3436    tty->cr();
3437    tty->print("  found:");
3438    os::Posix::print_sa_flags(tty, act.sa_flags);
3439    tty->cr();
3440    // No need to check this sig any longer
3441    sigaddset(&check_signal_done, sig);
3442  }
3443
3444  // Dump all the signal
3445  if (sigismember(&check_signal_done, sig)) {
3446    print_signal_handlers(tty, buf, O_BUFLEN);
3447  }
3448}
3449
3450// To install functions for atexit system call
3451extern "C" {
3452  static void perfMemory_exit_helper() {
3453    perfMemory_exit();
3454  }
3455}
3456
3457// This is called _before_ the most of global arguments have been parsed.
3458void os::init(void) {
3459  // This is basic, we want to know if that ever changes.
3460  // (Shared memory boundary is supposed to be a 256M aligned.)
3461  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3462
3463  // Record process break at startup.
3464  g_brk_at_startup = (address) ::sbrk(0);
3465  assert(g_brk_at_startup != (address) -1, "sbrk failed");
3466
3467  // First off, we need to know whether we run on AIX or PASE, and
3468  // the OS level we run on.
3469  os::Aix::initialize_os_info();
3470
3471  // Scan environment (SPEC1170 behaviour, etc).
3472  os::Aix::scan_environment();
3473
3474  // Probe multipage support.
3475  query_multipage_support();
3476
3477  // Act like we only have one page size by eliminating corner cases which
3478  // we did not support very well anyway.
3479  // We have two input conditions:
3480  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3481  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3482  //    setting.
3483  //    Data segment page size is important for us because it defines the thread stack page
3484  //    size, which is needed for guard page handling, stack banging etc.
3485  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3486  //    and should be allocated with 64k pages.
3487  //
3488  // So, we do the following:
3489  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3490  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3491  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3492  // 64k          no              --- AIX 5.2 ? ---
3493  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3494
3495  // We explicitly leave no option to change page size, because only upgrading would work,
3496  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3497
3498  if (g_multipage_support.datapsize == SIZE_4K) {
3499    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3500    if (g_multipage_support.can_use_64K_pages) {
3501      // .. but we are able to use 64K pages dynamically.
3502      // This would be typical for java launchers which are not linked
3503      // with datapsize=64K (like, any other launcher but our own).
3504      //
3505      // In this case it would be smart to allocate the java heap with 64K
3506      // to get the performance benefit, and to fake 64k pages for the
3507      // data segment (when dealing with thread stacks).
3508      //
3509      // However, leave a possibility to downgrade to 4K, using
3510      // -XX:-Use64KPages.
3511      if (Use64KPages) {
3512        trcVerbose("64K page mode (faked for data segment)");
3513        Aix::_page_size = SIZE_64K;
3514      } else {
3515        trcVerbose("4K page mode (Use64KPages=off)");
3516        Aix::_page_size = SIZE_4K;
3517      }
3518    } else {
3519      // .. and not able to allocate 64k pages dynamically. Here, just
3520      // fall back to 4K paged mode and use mmap for everything.
3521      trcVerbose("4K page mode");
3522      Aix::_page_size = SIZE_4K;
3523      FLAG_SET_ERGO(bool, Use64KPages, false);
3524    }
3525  } else {
3526    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3527    // This normally means that we can allocate 64k pages dynamically.
3528    // (There is one special case where this may be false: EXTSHM=on.
3529    // but we decided to not support that mode).
3530    assert0(g_multipage_support.can_use_64K_pages);
3531    Aix::_page_size = SIZE_64K;
3532    trcVerbose("64K page mode");
3533    FLAG_SET_ERGO(bool, Use64KPages, true);
3534  }
3535
3536  // Short-wire stack page size to base page size; if that works, we just remove
3537  // that stack page size altogether.
3538  Aix::_stack_page_size = Aix::_page_size;
3539
3540  // For now UseLargePages is just ignored.
3541  FLAG_SET_ERGO(bool, UseLargePages, false);
3542  _page_sizes[0] = 0;
3543
3544  // debug trace
3545  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3546
3547  // Next, we need to initialize libo4 and libperfstat libraries.
3548  if (os::Aix::on_pase()) {
3549    os::Aix::initialize_libo4();
3550  } else {
3551    os::Aix::initialize_libperfstat();
3552  }
3553
3554  // Reset the perfstat information provided by ODM.
3555  if (os::Aix::on_aix()) {
3556    libperfstat::perfstat_reset();
3557  }
3558
3559  // Now initialze basic system properties. Note that for some of the values we
3560  // need libperfstat etc.
3561  os::Aix::initialize_system_info();
3562
3563  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3564
3565  init_random(1234567);
3566
3567  ThreadCritical::initialize();
3568
3569  // Main_thread points to the aboriginal thread.
3570  Aix::_main_thread = pthread_self();
3571
3572  initial_time_count = os::elapsed_counter();
3573
3574  // If the pagesize of the VM is greater than 8K determine the appropriate
3575  // number of initial guard pages. The user can change this with the
3576  // command line arguments, if needed.
3577  if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3578    StackYellowPages = 1;
3579    StackRedPages = 1;
3580    StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3581  }
3582}
3583
3584// This is called _after_ the global arguments have been parsed.
3585jint os::init_2(void) {
3586
3587  if (os::Aix::on_pase()) {
3588    trcVerbose("Running on PASE.");
3589  } else {
3590    trcVerbose("Running on AIX (not PASE).");
3591  }
3592
3593  trcVerbose("processor count: %d", os::_processor_count);
3594  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3595
3596  // Initially build up the loaded dll map.
3597  LoadedLibraries::reload();
3598  if (Verbose) {
3599    trcVerbose("Loaded Libraries: ");
3600    LoadedLibraries::print(tty);
3601  }
3602
3603  const int page_size = Aix::page_size();
3604  const int map_size = page_size;
3605
3606  address map_address = (address) MAP_FAILED;
3607  const int prot  = PROT_READ;
3608  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3609
3610  // Use optimized addresses for the polling page,
3611  // e.g. map it to a special 32-bit address.
3612  if (OptimizePollingPageLocation) {
3613    // architecture-specific list of address wishes:
3614    address address_wishes[] = {
3615      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3616      // PPC64: all address wishes are non-negative 32 bit values where
3617      // the lower 16 bits are all zero. we can load these addresses
3618      // with a single ppc_lis instruction.
3619      (address) 0x30000000, (address) 0x31000000,
3620      (address) 0x32000000, (address) 0x33000000,
3621      (address) 0x40000000, (address) 0x41000000,
3622      (address) 0x42000000, (address) 0x43000000,
3623      (address) 0x50000000, (address) 0x51000000,
3624      (address) 0x52000000, (address) 0x53000000,
3625      (address) 0x60000000, (address) 0x61000000,
3626      (address) 0x62000000, (address) 0x63000000
3627    };
3628    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3629
3630    // iterate over the list of address wishes:
3631    for (int i=0; i<address_wishes_length; i++) {
3632      // Try to map with current address wish.
3633      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3634      // fail if the address is already mapped.
3635      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3636                                     map_size, prot,
3637                                     flags | MAP_FIXED,
3638                                     -1, 0);
3639      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3640                   address_wishes[i], map_address + (ssize_t)page_size);
3641
3642      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3643        // Map succeeded and map_address is at wished address, exit loop.
3644        break;
3645      }
3646
3647      if (map_address != (address) MAP_FAILED) {
3648        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3649        ::munmap(map_address, map_size);
3650        map_address = (address) MAP_FAILED;
3651      }
3652      // Map failed, continue loop.
3653    }
3654  } // end OptimizePollingPageLocation
3655
3656  if (map_address == (address) MAP_FAILED) {
3657    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3658  }
3659  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3660  os::set_polling_page(map_address);
3661
3662  if (!UseMembar) {
3663    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3664    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3665    os::set_memory_serialize_page(mem_serialize_page);
3666
3667    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3668        mem_serialize_page, mem_serialize_page + Aix::page_size(),
3669        Aix::page_size(), Aix::page_size());
3670  }
3671
3672  // initialize suspend/resume support - must do this before signal_sets_init()
3673  if (SR_initialize() != 0) {
3674    perror("SR_initialize failed");
3675    return JNI_ERR;
3676  }
3677
3678  Aix::signal_sets_init();
3679  Aix::install_signal_handlers();
3680
3681  // Check minimum allowable stack size for thread creation and to initialize
3682  // the java system classes, including StackOverflowError - depends on page
3683  // size. Add a page for compiler2 recursion in main thread.
3684  // Add in 2*BytesPerWord times page size to account for VM stack during
3685  // class initialization depending on 32 or 64 bit VM.
3686  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3687            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3688                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3689
3690  os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3691
3692  size_t threadStackSizeInBytes = ThreadStackSize * K;
3693  if (threadStackSizeInBytes != 0 &&
3694      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3695    tty->print_cr("\nThe stack size specified is too small, "
3696                  "Specify at least %dk",
3697                  os::Aix::min_stack_allowed / K);
3698    return JNI_ERR;
3699  }
3700
3701  // Make the stack size a multiple of the page size so that
3702  // the yellow/red zones can be guarded.
3703  // Note that this can be 0, if no default stacksize was set.
3704  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3705
3706  if (UseNUMA) {
3707    UseNUMA = false;
3708    warning("NUMA optimizations are not available on this OS.");
3709  }
3710
3711  if (MaxFDLimit) {
3712    // Set the number of file descriptors to max. print out error
3713    // if getrlimit/setrlimit fails but continue regardless.
3714    struct rlimit nbr_files;
3715    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3716    if (status != 0) {
3717      if (PrintMiscellaneous && (Verbose || WizardMode))
3718        perror("os::init_2 getrlimit failed");
3719    } else {
3720      nbr_files.rlim_cur = nbr_files.rlim_max;
3721      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3722      if (status != 0) {
3723        if (PrintMiscellaneous && (Verbose || WizardMode))
3724          perror("os::init_2 setrlimit failed");
3725      }
3726    }
3727  }
3728
3729  if (PerfAllowAtExitRegistration) {
3730    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3731    // At exit functions can be delayed until process exit time, which
3732    // can be problematic for embedded VM situations. Embedded VMs should
3733    // call DestroyJavaVM() to assure that VM resources are released.
3734
3735    // Note: perfMemory_exit_helper atexit function may be removed in
3736    // the future if the appropriate cleanup code can be added to the
3737    // VM_Exit VMOperation's doit method.
3738    if (atexit(perfMemory_exit_helper) != 0) {
3739      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3740    }
3741  }
3742
3743  return JNI_OK;
3744}
3745
3746// Mark the polling page as unreadable
3747void os::make_polling_page_unreadable(void) {
3748  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3749    fatal("Could not disable polling page");
3750  }
3751};
3752
3753// Mark the polling page as readable
3754void os::make_polling_page_readable(void) {
3755  // Changed according to os_linux.cpp.
3756  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3757    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3758  }
3759};
3760
3761int os::active_processor_count() {
3762  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3763  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3764  return online_cpus;
3765}
3766
3767void os::set_native_thread_name(const char *name) {
3768  // Not yet implemented.
3769  return;
3770}
3771
3772bool os::distribute_processes(uint length, uint* distribution) {
3773  // Not yet implemented.
3774  return false;
3775}
3776
3777bool os::bind_to_processor(uint processor_id) {
3778  // Not yet implemented.
3779  return false;
3780}
3781
3782void os::SuspendedThreadTask::internal_do_task() {
3783  if (do_suspend(_thread->osthread())) {
3784    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3785    do_task(context);
3786    do_resume(_thread->osthread());
3787  }
3788}
3789
3790class PcFetcher : public os::SuspendedThreadTask {
3791public:
3792  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3793  ExtendedPC result();
3794protected:
3795  void do_task(const os::SuspendedThreadTaskContext& context);
3796private:
3797  ExtendedPC _epc;
3798};
3799
3800ExtendedPC PcFetcher::result() {
3801  guarantee(is_done(), "task is not done yet.");
3802  return _epc;
3803}
3804
3805void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3806  Thread* thread = context.thread();
3807  OSThread* osthread = thread->osthread();
3808  if (osthread->ucontext() != NULL) {
3809    _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3810  } else {
3811    // NULL context is unexpected, double-check this is the VMThread.
3812    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3813  }
3814}
3815
3816// Suspends the target using the signal mechanism and then grabs the PC before
3817// resuming the target. Used by the flat-profiler only
3818ExtendedPC os::get_thread_pc(Thread* thread) {
3819  // Make sure that it is called by the watcher for the VMThread.
3820  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3821  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3822
3823  PcFetcher fetcher(thread);
3824  fetcher.run();
3825  return fetcher.result();
3826}
3827
3828////////////////////////////////////////////////////////////////////////////////
3829// debug support
3830
3831bool os::find(address addr, outputStream* st) {
3832
3833  st->print(PTR_FORMAT ": ", addr);
3834
3835  loaded_module_t lm;
3836  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3837      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3838    st->print("%s", lm.path);
3839    return true;
3840  }
3841
3842  return false;
3843}
3844
3845////////////////////////////////////////////////////////////////////////////////
3846// misc
3847
3848// This does not do anything on Aix. This is basically a hook for being
3849// able to use structured exception handling (thread-local exception filters)
3850// on, e.g., Win32.
3851void
3852os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3853                         JavaCallArguments* args, Thread* thread) {
3854  f(value, method, args, thread);
3855}
3856
3857void os::print_statistics() {
3858}
3859
3860bool os::message_box(const char* title, const char* message) {
3861  int i;
3862  fdStream err(defaultStream::error_fd());
3863  for (i = 0; i < 78; i++) err.print_raw("=");
3864  err.cr();
3865  err.print_raw_cr(title);
3866  for (i = 0; i < 78; i++) err.print_raw("-");
3867  err.cr();
3868  err.print_raw_cr(message);
3869  for (i = 0; i < 78; i++) err.print_raw("=");
3870  err.cr();
3871
3872  char buf[16];
3873  // Prevent process from exiting upon "read error" without consuming all CPU
3874  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3875
3876  return buf[0] == 'y' || buf[0] == 'Y';
3877}
3878
3879int os::stat(const char *path, struct stat *sbuf) {
3880  char pathbuf[MAX_PATH];
3881  if (strlen(path) > MAX_PATH - 1) {
3882    errno = ENAMETOOLONG;
3883    return -1;
3884  }
3885  os::native_path(strcpy(pathbuf, path));
3886  return ::stat(pathbuf, sbuf);
3887}
3888
3889bool os::check_heap(bool force) {
3890  return true;
3891}
3892
3893// Is a (classpath) directory empty?
3894bool os::dir_is_empty(const char* path) {
3895  DIR *dir = NULL;
3896  struct dirent *ptr;
3897
3898  dir = opendir(path);
3899  if (dir == NULL) return true;
3900
3901  /* Scan the directory */
3902  bool result = true;
3903  char buf[sizeof(struct dirent) + MAX_PATH];
3904  while (result && (ptr = ::readdir(dir)) != NULL) {
3905    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3906      result = false;
3907    }
3908  }
3909  closedir(dir);
3910  return result;
3911}
3912
3913// This code originates from JDK's sysOpen and open64_w
3914// from src/solaris/hpi/src/system_md.c
3915
3916int os::open(const char *path, int oflag, int mode) {
3917
3918  if (strlen(path) > MAX_PATH - 1) {
3919    errno = ENAMETOOLONG;
3920    return -1;
3921  }
3922  int fd;
3923
3924  fd = ::open64(path, oflag, mode);
3925  if (fd == -1) return -1;
3926
3927  // If the open succeeded, the file might still be a directory.
3928  {
3929    struct stat64 buf64;
3930    int ret = ::fstat64(fd, &buf64);
3931    int st_mode = buf64.st_mode;
3932
3933    if (ret != -1) {
3934      if ((st_mode & S_IFMT) == S_IFDIR) {
3935        errno = EISDIR;
3936        ::close(fd);
3937        return -1;
3938      }
3939    } else {
3940      ::close(fd);
3941      return -1;
3942    }
3943  }
3944
3945  // All file descriptors that are opened in the JVM and not
3946  // specifically destined for a subprocess should have the
3947  // close-on-exec flag set. If we don't set it, then careless 3rd
3948  // party native code might fork and exec without closing all
3949  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3950  // UNIXProcess.c), and this in turn might:
3951  //
3952  // - cause end-of-file to fail to be detected on some file
3953  //   descriptors, resulting in mysterious hangs, or
3954  //
3955  // - might cause an fopen in the subprocess to fail on a system
3956  //   suffering from bug 1085341.
3957  //
3958  // (Yes, the default setting of the close-on-exec flag is a Unix
3959  // design flaw.)
3960  //
3961  // See:
3962  // 1085341: 32-bit stdio routines should support file descriptors >255
3963  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3964  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3965#ifdef FD_CLOEXEC
3966  {
3967    int flags = ::fcntl(fd, F_GETFD);
3968    if (flags != -1)
3969      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3970  }
3971#endif
3972
3973  return fd;
3974}
3975
3976// create binary file, rewriting existing file if required
3977int os::create_binary_file(const char* path, bool rewrite_existing) {
3978  int oflags = O_WRONLY | O_CREAT;
3979  if (!rewrite_existing) {
3980    oflags |= O_EXCL;
3981  }
3982  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3983}
3984
3985// return current position of file pointer
3986jlong os::current_file_offset(int fd) {
3987  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3988}
3989
3990// move file pointer to the specified offset
3991jlong os::seek_to_file_offset(int fd, jlong offset) {
3992  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3993}
3994
3995// This code originates from JDK's sysAvailable
3996// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3997
3998int os::available(int fd, jlong *bytes) {
3999  jlong cur, end;
4000  int mode;
4001  struct stat64 buf64;
4002
4003  if (::fstat64(fd, &buf64) >= 0) {
4004    mode = buf64.st_mode;
4005    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4006      int n;
4007      if (::ioctl(fd, FIONREAD, &n) >= 0) {
4008        *bytes = n;
4009        return 1;
4010      }
4011    }
4012  }
4013  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4014    return 0;
4015  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4016    return 0;
4017  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4018    return 0;
4019  }
4020  *bytes = end - cur;
4021  return 1;
4022}
4023
4024// Map a block of memory.
4025char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4026                        char *addr, size_t bytes, bool read_only,
4027                        bool allow_exec) {
4028  int prot;
4029  int flags = MAP_PRIVATE;
4030
4031  if (read_only) {
4032    prot = PROT_READ;
4033    flags = MAP_SHARED;
4034  } else {
4035    prot = PROT_READ | PROT_WRITE;
4036    flags = MAP_PRIVATE;
4037  }
4038
4039  if (allow_exec) {
4040    prot |= PROT_EXEC;
4041  }
4042
4043  if (addr != NULL) {
4044    flags |= MAP_FIXED;
4045  }
4046
4047  // Allow anonymous mappings if 'fd' is -1.
4048  if (fd == -1) {
4049    flags |= MAP_ANONYMOUS;
4050  }
4051
4052  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4053                                     fd, file_offset);
4054  if (mapped_address == MAP_FAILED) {
4055    return NULL;
4056  }
4057  return mapped_address;
4058}
4059
4060// Remap a block of memory.
4061char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4062                          char *addr, size_t bytes, bool read_only,
4063                          bool allow_exec) {
4064  // same as map_memory() on this OS
4065  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4066                        allow_exec);
4067}
4068
4069// Unmap a block of memory.
4070bool os::pd_unmap_memory(char* addr, size_t bytes) {
4071  return munmap(addr, bytes) == 0;
4072}
4073
4074// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4075// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4076// of a thread.
4077//
4078// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4079// the fast estimate available on the platform.
4080
4081jlong os::current_thread_cpu_time() {
4082  // return user + sys since the cost is the same
4083  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4084  assert(n >= 0, "negative CPU time");
4085  return n;
4086}
4087
4088jlong os::thread_cpu_time(Thread* thread) {
4089  // consistent with what current_thread_cpu_time() returns
4090  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4091  assert(n >= 0, "negative CPU time");
4092  return n;
4093}
4094
4095jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4096  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4097  assert(n >= 0, "negative CPU time");
4098  return n;
4099}
4100
4101static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4102  bool error = false;
4103
4104  jlong sys_time = 0;
4105  jlong user_time = 0;
4106
4107  // Reimplemented using getthrds64().
4108  //
4109  // Works like this:
4110  // For the thread in question, get the kernel thread id. Then get the
4111  // kernel thread statistics using that id.
4112  //
4113  // This only works of course when no pthread scheduling is used,
4114  // i.e. there is a 1:1 relationship to kernel threads.
4115  // On AIX, see AIXTHREAD_SCOPE variable.
4116
4117  pthread_t pthtid = thread->osthread()->pthread_id();
4118
4119  // retrieve kernel thread id for the pthread:
4120  tid64_t tid = 0;
4121  struct __pthrdsinfo pinfo;
4122  // I just love those otherworldly IBM APIs which force me to hand down
4123  // dummy buffers for stuff I dont care for...
4124  char dummy[1];
4125  int dummy_size = sizeof(dummy);
4126  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4127                          dummy, &dummy_size) == 0) {
4128    tid = pinfo.__pi_tid;
4129  } else {
4130    tty->print_cr("pthread_getthrds_np failed.");
4131    error = true;
4132  }
4133
4134  // retrieve kernel timing info for that kernel thread
4135  if (!error) {
4136    struct thrdentry64 thrdentry;
4137    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4138      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4139      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4140    } else {
4141      tty->print_cr("pthread_getthrds_np failed.");
4142      error = true;
4143    }
4144  }
4145
4146  if (p_sys_time) {
4147    *p_sys_time = sys_time;
4148  }
4149
4150  if (p_user_time) {
4151    *p_user_time = user_time;
4152  }
4153
4154  if (error) {
4155    return false;
4156  }
4157
4158  return true;
4159}
4160
4161jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4162  jlong sys_time;
4163  jlong user_time;
4164
4165  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4166    return -1;
4167  }
4168
4169  return user_sys_cpu_time ? sys_time + user_time : user_time;
4170}
4171
4172void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4173  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4174  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4175  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4176  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4177}
4178
4179void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4180  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4181  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4182  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4183  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4184}
4185
4186bool os::is_thread_cpu_time_supported() {
4187  return true;
4188}
4189
4190// System loadavg support. Returns -1 if load average cannot be obtained.
4191// For now just return the system wide load average (no processor sets).
4192int os::loadavg(double values[], int nelem) {
4193
4194  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4195  guarantee(values, "argument error");
4196
4197  if (os::Aix::on_pase()) {
4198
4199    // AS/400 PASE: use libo4 porting library
4200    double v[3] = { 0.0, 0.0, 0.0 };
4201
4202    if (libo4::get_load_avg(v, v + 1, v + 2)) {
4203      for (int i = 0; i < nelem; i ++) {
4204        values[i] = v[i];
4205      }
4206      return nelem;
4207    } else {
4208      return -1;
4209    }
4210
4211  } else {
4212
4213    // AIX: use libperfstat
4214    libperfstat::cpuinfo_t ci;
4215    if (libperfstat::get_cpuinfo(&ci)) {
4216      for (int i = 0; i < nelem; i++) {
4217        values[i] = ci.loadavg[i];
4218      }
4219    } else {
4220      return -1;
4221    }
4222    return nelem;
4223  }
4224}
4225
4226void os::pause() {
4227  char filename[MAX_PATH];
4228  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4229    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4230  } else {
4231    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4232  }
4233
4234  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4235  if (fd != -1) {
4236    struct stat buf;
4237    ::close(fd);
4238    while (::stat(filename, &buf) == 0) {
4239      (void)::poll(NULL, 0, 100);
4240    }
4241  } else {
4242    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4243  }
4244}
4245
4246bool os::Aix::is_primordial_thread() {
4247  if (pthread_self() == (pthread_t)1) {
4248    return true;
4249  } else {
4250    return false;
4251  }
4252}
4253
4254// OS recognitions (PASE/AIX, OS level) call this before calling any
4255// one of Aix::on_pase(), Aix::os_version() static
4256void os::Aix::initialize_os_info() {
4257
4258  assert(_on_pase == -1 && _os_version == 0, "already called.");
4259
4260  struct utsname uts;
4261  memset(&uts, 0, sizeof(uts));
4262  strcpy(uts.sysname, "?");
4263  if (::uname(&uts) == -1) {
4264    trcVerbose("uname failed (%d)", errno);
4265    guarantee(0, "Could not determine whether we run on AIX or PASE");
4266  } else {
4267    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4268               "node \"%s\" machine \"%s\"\n",
4269               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4270    const int major = atoi(uts.version);
4271    assert(major > 0, "invalid OS version");
4272    const int minor = atoi(uts.release);
4273    assert(minor > 0, "invalid OS release");
4274    _os_version = (major << 24) | (minor << 16);
4275    char ver_str[20] = {0};
4276    char *name_str = "unknown OS";
4277    if (strcmp(uts.sysname, "OS400") == 0) {
4278      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4279      _on_pase = 1;
4280      if (os_version_short() < 0x0504) {
4281        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4282        assert(false, "OS/400 release too old.");
4283      }
4284      name_str = "OS/400 (pase)";
4285      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4286    } else if (strcmp(uts.sysname, "AIX") == 0) {
4287      // We run on AIX. We do not support versions older than AIX 5.3.
4288      _on_pase = 0;
4289      // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4290      odmWrapper::determine_os_kernel_version(&_os_version);
4291      if (os_version_short() < 0x0503) {
4292        trcVerbose("AIX release older than AIX 5.3 not supported.");
4293        assert(false, "AIX release too old.");
4294      }
4295      name_str = "AIX";
4296      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4297                   major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4298    } else {
4299      assert(false, name_str);
4300    }
4301    trcVerbose("We run on %s %s", name_str, ver_str);
4302  }
4303
4304  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4305} // end: os::Aix::initialize_os_info()
4306
4307// Scan environment for important settings which might effect the VM.
4308// Trace out settings. Warn about invalid settings and/or correct them.
4309//
4310// Must run after os::Aix::initialue_os_info().
4311void os::Aix::scan_environment() {
4312
4313  char* p;
4314  int rc;
4315
4316  // Warn explicity if EXTSHM=ON is used. That switch changes how
4317  // System V shared memory behaves. One effect is that page size of
4318  // shared memory cannot be change dynamically, effectivly preventing
4319  // large pages from working.
4320  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4321  // recommendation is (in OSS notes) to switch it off.
4322  p = ::getenv("EXTSHM");
4323  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4324  if (p && strcasecmp(p, "ON") == 0) {
4325    _extshm = 1;
4326    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4327    if (!AllowExtshm) {
4328      // We allow under certain conditions the user to continue. However, we want this
4329      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4330      // that the VM is not able to allocate 64k pages for the heap.
4331      // We do not want to run with reduced performance.
4332      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4333    }
4334  } else {
4335    _extshm = 0;
4336  }
4337
4338  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4339  // Not tested, not supported.
4340  //
4341  // Note that it might be worth the trouble to test and to require it, if only to
4342  // get useful return codes for mprotect.
4343  //
4344  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4345  // exec() ? before loading the libjvm ? ....)
4346  p = ::getenv("XPG_SUS_ENV");
4347  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4348  if (p && strcmp(p, "ON") == 0) {
4349    _xpg_sus_mode = 1;
4350    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4351    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4352    // clobber address ranges. If we ever want to support that, we have to do some
4353    // testing first.
4354    guarantee(false, "XPG_SUS_ENV=ON not supported");
4355  } else {
4356    _xpg_sus_mode = 0;
4357  }
4358
4359  if (os::Aix::on_pase()) {
4360    p = ::getenv("QIBM_MULTI_THREADED");
4361    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4362  }
4363
4364  p = ::getenv("LDR_CNTRL");
4365  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4366  if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4367    if (p && ::strstr(p, "TEXTPSIZE")) {
4368      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4369        "you may experience hangs or crashes on OS/400 V7R1.");
4370    }
4371  }
4372
4373  p = ::getenv("AIXTHREAD_GUARDPAGES");
4374  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4375
4376} // end: os::Aix::scan_environment()
4377
4378// PASE: initialize the libo4 library (PASE porting library).
4379void os::Aix::initialize_libo4() {
4380  guarantee(os::Aix::on_pase(), "OS/400 only.");
4381  if (!libo4::init()) {
4382    trcVerbose("libo4 initialization failed.");
4383    assert(false, "libo4 initialization failed");
4384  } else {
4385    trcVerbose("libo4 initialized.");
4386  }
4387}
4388
4389// AIX: initialize the libperfstat library.
4390void os::Aix::initialize_libperfstat() {
4391  assert(os::Aix::on_aix(), "AIX only");
4392  if (!libperfstat::init()) {
4393    trcVerbose("libperfstat initialization failed.");
4394    assert(false, "libperfstat initialization failed");
4395  } else {
4396    trcVerbose("libperfstat initialized.");
4397  }
4398}
4399
4400/////////////////////////////////////////////////////////////////////////////
4401// thread stack
4402
4403// Function to query the current stack size using pthread_getthrds_np.
4404static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4405  // This only works when invoked on a pthread. As we agreed not to use
4406  // primordial threads anyway, I assert here.
4407  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4408
4409  // Information about this api can be found (a) in the pthread.h header and
4410  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4411  //
4412  // The use of this API to find out the current stack is kind of undefined.
4413  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4414  // enough for cases where I let the pthread library create its stacks. For cases
4415  // where I create an own stack and pass this to pthread_create, it seems not to
4416  // work (the returned stack size in that case is 0).
4417
4418  pthread_t tid = pthread_self();
4419  struct __pthrdsinfo pinfo;
4420  char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4421  int dummy_size = sizeof(dummy);
4422
4423  memset(&pinfo, 0, sizeof(pinfo));
4424
4425  const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4426                                     sizeof(pinfo), dummy, &dummy_size);
4427
4428  if (rc != 0) {
4429    assert0(false);
4430    trcVerbose("pthread_getthrds_np failed (%d)", rc);
4431    return false;
4432  }
4433  guarantee0(pinfo.__pi_stackend);
4434
4435  // The following may happen when invoking pthread_getthrds_np on a pthread
4436  // running on a user provided stack (when handing down a stack to pthread
4437  // create, see pthread_attr_setstackaddr).
4438  // Not sure what to do then.
4439
4440  guarantee0(pinfo.__pi_stacksize);
4441
4442  // Note: we get three values from pthread_getthrds_np:
4443  //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4444  //
4445  // high addr    ---------------------
4446  //
4447  //    |         pthread internal data, like ~2K
4448  //    |
4449  //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4450  //    |
4451  //    |
4452  //    |
4453  //    |
4454  //    |
4455  //    |
4456  //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4457  //    |
4458  //    |          padding to align the following AIX guard pages, if enabled.
4459  //    |
4460  //    V          ---------------------   __pi_stackaddr
4461  //
4462  // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4463  //
4464
4465  address stack_base = (address)(pinfo.__pi_stackend);
4466  address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
4467    os::vm_page_size());
4468  size_t stack_size = stack_base - stack_low_addr;
4469
4470  if (p_stack_base) {
4471    *p_stack_base = stack_base;
4472  }
4473
4474  if (p_stack_size) {
4475    *p_stack_size = stack_size;
4476  }
4477
4478  return true;
4479}
4480
4481// Get the current stack base from the OS (actually, the pthread library).
4482address os::current_stack_base() {
4483  address p;
4484  query_stack_dimensions(&p, 0);
4485  return p;
4486}
4487
4488// Get the current stack size from the OS (actually, the pthread library).
4489size_t os::current_stack_size() {
4490  size_t s;
4491  query_stack_dimensions(0, &s);
4492  return s;
4493}
4494
4495// Refer to the comments in os_solaris.cpp park-unpark.
4496
4497// utility to compute the abstime argument to timedwait:
4498// millis is the relative timeout time
4499// abstime will be the absolute timeout time
4500// TODO: replace compute_abstime() with unpackTime()
4501
4502static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4503  if (millis < 0) millis = 0;
4504  struct timeval now;
4505  int status = gettimeofday(&now, NULL);
4506  assert(status == 0, "gettimeofday");
4507  jlong seconds = millis / 1000;
4508  millis %= 1000;
4509  if (seconds > 50000000) { // see man cond_timedwait(3T)
4510    seconds = 50000000;
4511  }
4512  abstime->tv_sec = now.tv_sec  + seconds;
4513  long       usec = now.tv_usec + millis * 1000;
4514  if (usec >= 1000000) {
4515    abstime->tv_sec += 1;
4516    usec -= 1000000;
4517  }
4518  abstime->tv_nsec = usec * 1000;
4519  return abstime;
4520}
4521
4522// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4523// Conceptually TryPark() should be equivalent to park(0).
4524
4525int os::PlatformEvent::TryPark() {
4526  for (;;) {
4527    const int v = _Event;
4528    guarantee ((v == 0) || (v == 1), "invariant");
4529    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4530  }
4531}
4532
4533void os::PlatformEvent::park() {       // AKA "down()"
4534  // Invariant: Only the thread associated with the Event/PlatformEvent
4535  // may call park().
4536  // TODO: assert that _Assoc != NULL or _Assoc == Self
4537  int v;
4538  for (;;) {
4539    v = _Event;
4540    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4541  }
4542  guarantee (v >= 0, "invariant");
4543  if (v == 0) {
4544    // Do this the hard way by blocking ...
4545    int status = pthread_mutex_lock(_mutex);
4546    assert_status(status == 0, status, "mutex_lock");
4547    guarantee (_nParked == 0, "invariant");
4548    ++ _nParked;
4549    while (_Event < 0) {
4550      status = pthread_cond_wait(_cond, _mutex);
4551      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4552    }
4553    -- _nParked;
4554
4555    // In theory we could move the ST of 0 into _Event past the unlock(),
4556    // but then we'd need a MEMBAR after the ST.
4557    _Event = 0;
4558    status = pthread_mutex_unlock(_mutex);
4559    assert_status(status == 0, status, "mutex_unlock");
4560  }
4561  guarantee (_Event >= 0, "invariant");
4562}
4563
4564int os::PlatformEvent::park(jlong millis) {
4565  guarantee (_nParked == 0, "invariant");
4566
4567  int v;
4568  for (;;) {
4569    v = _Event;
4570    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4571  }
4572  guarantee (v >= 0, "invariant");
4573  if (v != 0) return OS_OK;
4574
4575  // We do this the hard way, by blocking the thread.
4576  // Consider enforcing a minimum timeout value.
4577  struct timespec abst;
4578  compute_abstime(&abst, millis);
4579
4580  int ret = OS_TIMEOUT;
4581  int status = pthread_mutex_lock(_mutex);
4582  assert_status(status == 0, status, "mutex_lock");
4583  guarantee (_nParked == 0, "invariant");
4584  ++_nParked;
4585
4586  // Object.wait(timo) will return because of
4587  // (a) notification
4588  // (b) timeout
4589  // (c) thread.interrupt
4590  //
4591  // Thread.interrupt and object.notify{All} both call Event::set.
4592  // That is, we treat thread.interrupt as a special case of notification.
4593  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4594  // We assume all ETIME returns are valid.
4595  //
4596  // TODO: properly differentiate simultaneous notify+interrupt.
4597  // In that case, we should propagate the notify to another waiter.
4598
4599  while (_Event < 0) {
4600    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4601    assert_status(status == 0 || status == ETIMEDOUT,
4602                  status, "cond_timedwait");
4603    if (!FilterSpuriousWakeups) break;         // previous semantics
4604    if (status == ETIMEDOUT) break;
4605    // We consume and ignore EINTR and spurious wakeups.
4606  }
4607  --_nParked;
4608  if (_Event >= 0) {
4609     ret = OS_OK;
4610  }
4611  _Event = 0;
4612  status = pthread_mutex_unlock(_mutex);
4613  assert_status(status == 0, status, "mutex_unlock");
4614  assert (_nParked == 0, "invariant");
4615  return ret;
4616}
4617
4618void os::PlatformEvent::unpark() {
4619  int v, AnyWaiters;
4620  for (;;) {
4621    v = _Event;
4622    if (v > 0) {
4623      // The LD of _Event could have reordered or be satisfied
4624      // by a read-aside from this processor's write buffer.
4625      // To avoid problems execute a barrier and then
4626      // ratify the value.
4627      OrderAccess::fence();
4628      if (_Event == v) return;
4629      continue;
4630    }
4631    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4632  }
4633  if (v < 0) {
4634    // Wait for the thread associated with the event to vacate
4635    int status = pthread_mutex_lock(_mutex);
4636    assert_status(status == 0, status, "mutex_lock");
4637    AnyWaiters = _nParked;
4638
4639    if (AnyWaiters != 0) {
4640      // We intentional signal *after* dropping the lock
4641      // to avoid a common class of futile wakeups.
4642      status = pthread_cond_signal(_cond);
4643      assert_status(status == 0, status, "cond_signal");
4644    }
4645    // Mutex should be locked for pthread_cond_signal(_cond).
4646    status = pthread_mutex_unlock(_mutex);
4647    assert_status(status == 0, status, "mutex_unlock");
4648  }
4649
4650  // Note that we signal() _after dropping the lock for "immortal" Events.
4651  // This is safe and avoids a common class of futile wakeups. In rare
4652  // circumstances this can cause a thread to return prematurely from
4653  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4654  // simply re-test the condition and re-park itself.
4655}
4656
4657
4658// JSR166
4659// -------------------------------------------------------
4660
4661//
4662// The solaris and linux implementations of park/unpark are fairly
4663// conservative for now, but can be improved. They currently use a
4664// mutex/condvar pair, plus a a count.
4665// Park decrements count if > 0, else does a condvar wait. Unpark
4666// sets count to 1 and signals condvar. Only one thread ever waits
4667// on the condvar. Contention seen when trying to park implies that someone
4668// is unparking you, so don't wait. And spurious returns are fine, so there
4669// is no need to track notifications.
4670//
4671
4672#define MAX_SECS 100000000
4673//
4674// This code is common to linux and solaris and will be moved to a
4675// common place in dolphin.
4676//
4677// The passed in time value is either a relative time in nanoseconds
4678// or an absolute time in milliseconds. Either way it has to be unpacked
4679// into suitable seconds and nanoseconds components and stored in the
4680// given timespec structure.
4681// Given time is a 64-bit value and the time_t used in the timespec is only
4682// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4683// overflow if times way in the future are given. Further on Solaris versions
4684// prior to 10 there is a restriction (see cond_timedwait) that the specified
4685// number of seconds, in abstime, is less than current_time + 100,000,000.
4686// As it will be 28 years before "now + 100000000" will overflow we can
4687// ignore overflow and just impose a hard-limit on seconds using the value
4688// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4689// years from "now".
4690//
4691
4692static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4693  assert (time > 0, "convertTime");
4694
4695  struct timeval now;
4696  int status = gettimeofday(&now, NULL);
4697  assert(status == 0, "gettimeofday");
4698
4699  time_t max_secs = now.tv_sec + MAX_SECS;
4700
4701  if (isAbsolute) {
4702    jlong secs = time / 1000;
4703    if (secs > max_secs) {
4704      absTime->tv_sec = max_secs;
4705    }
4706    else {
4707      absTime->tv_sec = secs;
4708    }
4709    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4710  }
4711  else {
4712    jlong secs = time / NANOSECS_PER_SEC;
4713    if (secs >= MAX_SECS) {
4714      absTime->tv_sec = max_secs;
4715      absTime->tv_nsec = 0;
4716    }
4717    else {
4718      absTime->tv_sec = now.tv_sec + secs;
4719      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4720      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4721        absTime->tv_nsec -= NANOSECS_PER_SEC;
4722        ++absTime->tv_sec; // note: this must be <= max_secs
4723      }
4724    }
4725  }
4726  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4727  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4728  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4729  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4730}
4731
4732void Parker::park(bool isAbsolute, jlong time) {
4733  // Optional fast-path check:
4734  // Return immediately if a permit is available.
4735  if (_counter > 0) {
4736    _counter = 0;
4737    OrderAccess::fence();
4738    return;
4739  }
4740
4741  Thread* thread = Thread::current();
4742  assert(thread->is_Java_thread(), "Must be JavaThread");
4743  JavaThread *jt = (JavaThread *)thread;
4744
4745  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4746  // Check interrupt before trying to wait
4747  if (Thread::is_interrupted(thread, false)) {
4748    return;
4749  }
4750
4751  // Next, demultiplex/decode time arguments
4752  timespec absTime;
4753  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4754    return;
4755  }
4756  if (time > 0) {
4757    unpackTime(&absTime, isAbsolute, time);
4758  }
4759
4760  // Enter safepoint region
4761  // Beware of deadlocks such as 6317397.
4762  // The per-thread Parker:: mutex is a classic leaf-lock.
4763  // In particular a thread must never block on the Threads_lock while
4764  // holding the Parker:: mutex. If safepoints are pending both the
4765  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4766  ThreadBlockInVM tbivm(jt);
4767
4768  // Don't wait if cannot get lock since interference arises from
4769  // unblocking. Also. check interrupt before trying wait
4770  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4771    return;
4772  }
4773
4774  int status;
4775  if (_counter > 0) { // no wait needed
4776    _counter = 0;
4777    status = pthread_mutex_unlock(_mutex);
4778    assert (status == 0, "invariant");
4779    OrderAccess::fence();
4780    return;
4781  }
4782
4783#ifdef ASSERT
4784  // Don't catch signals while blocked; let the running threads have the signals.
4785  // (This allows a debugger to break into the running thread.)
4786  sigset_t oldsigs;
4787  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4788  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4789#endif
4790
4791  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4792  jt->set_suspend_equivalent();
4793  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4794
4795  if (time == 0) {
4796    status = pthread_cond_wait (_cond, _mutex);
4797  } else {
4798    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4799  }
4800  assert_status(status == 0 || status == EINTR ||
4801                status == ETIME || status == ETIMEDOUT,
4802                status, "cond_timedwait");
4803
4804#ifdef ASSERT
4805  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4806#endif
4807
4808  _counter = 0;
4809  status = pthread_mutex_unlock(_mutex);
4810  assert_status(status == 0, status, "invariant");
4811  // If externally suspended while waiting, re-suspend
4812  if (jt->handle_special_suspend_equivalent_condition()) {
4813    jt->java_suspend_self();
4814  }
4815
4816  OrderAccess::fence();
4817}
4818
4819void Parker::unpark() {
4820  int s, status;
4821  status = pthread_mutex_lock(_mutex);
4822  assert (status == 0, "invariant");
4823  s = _counter;
4824  _counter = 1;
4825  if (s < 1) {
4826    status = pthread_mutex_unlock(_mutex);
4827    assert (status == 0, "invariant");
4828    status = pthread_cond_signal (_cond);
4829    assert (status == 0, "invariant");
4830  } else {
4831    pthread_mutex_unlock(_mutex);
4832    assert (status == 0, "invariant");
4833  }
4834}
4835
4836extern char** environ;
4837
4838// Run the specified command in a separate process. Return its exit value,
4839// or -1 on failure (e.g. can't fork a new process).
4840// Unlike system(), this function can be called from signal handler. It
4841// doesn't block SIGINT et al.
4842int os::fork_and_exec(char* cmd) {
4843  char * argv[4] = {"sh", "-c", cmd, NULL};
4844
4845  pid_t pid = fork();
4846
4847  if (pid < 0) {
4848    // fork failed
4849    return -1;
4850
4851  } else if (pid == 0) {
4852    // child process
4853
4854    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4855    execve("/usr/bin/sh", argv, environ);
4856
4857    // execve failed
4858    _exit(-1);
4859
4860  } else {
4861    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4862    // care about the actual exit code, for now.
4863
4864    int status;
4865
4866    // Wait for the child process to exit. This returns immediately if
4867    // the child has already exited. */
4868    while (waitpid(pid, &status, 0) < 0) {
4869      switch (errno) {
4870        case ECHILD: return 0;
4871        case EINTR: break;
4872        default: return -1;
4873      }
4874    }
4875
4876    if (WIFEXITED(status)) {
4877      // The child exited normally; get its exit code.
4878      return WEXITSTATUS(status);
4879    } else if (WIFSIGNALED(status)) {
4880      // The child exited because of a signal.
4881      // The best value to return is 0x80 + signal number,
4882      // because that is what all Unix shells do, and because
4883      // it allows callers to distinguish between process exit and
4884      // process death by signal.
4885      return 0x80 + WTERMSIG(status);
4886    } else {
4887      // Unknown exit code; pass it through.
4888      return status;
4889    }
4890  }
4891  return -1;
4892}
4893
4894// is_headless_jre()
4895//
4896// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4897// in order to report if we are running in a headless jre.
4898//
4899// Since JDK8 xawt/libmawt.so is moved into the same directory
4900// as libawt.so, and renamed libawt_xawt.so
4901bool os::is_headless_jre() {
4902  struct stat statbuf;
4903  char buf[MAXPATHLEN];
4904  char libmawtpath[MAXPATHLEN];
4905  const char *xawtstr = "/xawt/libmawt.so";
4906  const char *new_xawtstr = "/libawt_xawt.so";
4907
4908  char *p;
4909
4910  // Get path to libjvm.so
4911  os::jvm_path(buf, sizeof(buf));
4912
4913  // Get rid of libjvm.so
4914  p = strrchr(buf, '/');
4915  if (p == NULL) return false;
4916  else *p = '\0';
4917
4918  // Get rid of client or server
4919  p = strrchr(buf, '/');
4920  if (p == NULL) return false;
4921  else *p = '\0';
4922
4923  // check xawt/libmawt.so
4924  strcpy(libmawtpath, buf);
4925  strcat(libmawtpath, xawtstr);
4926  if (::stat(libmawtpath, &statbuf) == 0) return false;
4927
4928  // check libawt_xawt.so
4929  strcpy(libmawtpath, buf);
4930  strcat(libmawtpath, new_xawtstr);
4931  if (::stat(libmawtpath, &statbuf) == 0) return false;
4932
4933  return true;
4934}
4935
4936// Get the default path to the core file
4937// Returns the length of the string
4938int os::get_core_path(char* buffer, size_t bufferSize) {
4939  const char* p = get_current_directory(buffer, bufferSize);
4940
4941  if (p == NULL) {
4942    assert(p != NULL, "failed to get current directory");
4943    return 0;
4944  }
4945
4946  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4947                                               p, current_process_id());
4948
4949  return strlen(buffer);
4950}
4951
4952#ifndef PRODUCT
4953void TestReserveMemorySpecial_test() {
4954  // No tests available for this platform
4955}
4956#endif
4957
4958bool os::start_debugging(char *buf, int buflen) {
4959  int len = (int)strlen(buf);
4960  char *p = &buf[len];
4961
4962  jio_snprintf(p, buflen -len,
4963                 "\n\n"
4964                 "Do you want to debug the problem?\n\n"
4965                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4966                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4967                 "Otherwise, press RETURN to abort...",
4968                 os::current_process_id(),
4969                 os::current_thread_id(), thread_self());
4970
4971  bool yes = os::message_box("Unexpected Error", buf);
4972
4973  if (yes) {
4974    // yes, user asked VM to launch debugger
4975    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4976
4977    os::fork_and_exec(buf);
4978    yes = false;
4979  }
4980  return yes;
4981}
4982