os_aix.cpp revision 13249:a2753984d2c1
1/*
2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "logging/log.hpp"
40#include "libo4.hpp"
41#include "libperfstat_aix.hpp"
42#include "libodm_aix.hpp"
43#include "loadlib_aix.hpp"
44#include "memory/allocation.inline.hpp"
45#include "memory/filemap.hpp"
46#include "misc_aix.hpp"
47#include "oops/oop.inline.hpp"
48#include "os_aix.inline.hpp"
49#include "os_share_aix.hpp"
50#include "porting_aix.hpp"
51#include "prims/jniFastGetField.hpp"
52#include "prims/jvm.h"
53#include "prims/jvm_misc.hpp"
54#include "runtime/arguments.hpp"
55#include "runtime/atomic.hpp"
56#include "runtime/extendedPC.hpp"
57#include "runtime/globals.hpp"
58#include "runtime/interfaceSupport.hpp"
59#include "runtime/java.hpp"
60#include "runtime/javaCalls.hpp"
61#include "runtime/mutexLocker.hpp"
62#include "runtime/objectMonitor.hpp"
63#include "runtime/orderAccess.inline.hpp"
64#include "runtime/os.hpp"
65#include "runtime/osThread.hpp"
66#include "runtime/perfMemory.hpp"
67#include "runtime/sharedRuntime.hpp"
68#include "runtime/statSampler.hpp"
69#include "runtime/stubRoutines.hpp"
70#include "runtime/thread.inline.hpp"
71#include "runtime/threadCritical.hpp"
72#include "runtime/timer.hpp"
73#include "runtime/vm_version.hpp"
74#include "services/attachListener.hpp"
75#include "services/runtimeService.hpp"
76#include "utilities/align.hpp"
77#include "utilities/decoder.hpp"
78#include "utilities/defaultStream.hpp"
79#include "utilities/events.hpp"
80#include "utilities/growableArray.hpp"
81#include "utilities/vmError.hpp"
82
83// put OS-includes here (sorted alphabetically)
84#include <errno.h>
85#include <fcntl.h>
86#include <inttypes.h>
87#include <poll.h>
88#include <procinfo.h>
89#include <pthread.h>
90#include <pwd.h>
91#include <semaphore.h>
92#include <signal.h>
93#include <stdint.h>
94#include <stdio.h>
95#include <string.h>
96#include <unistd.h>
97#include <sys/ioctl.h>
98#include <sys/ipc.h>
99#include <sys/mman.h>
100#include <sys/resource.h>
101#include <sys/select.h>
102#include <sys/shm.h>
103#include <sys/socket.h>
104#include <sys/stat.h>
105#include <sys/sysinfo.h>
106#include <sys/systemcfg.h>
107#include <sys/time.h>
108#include <sys/times.h>
109#include <sys/types.h>
110#include <sys/utsname.h>
111#include <sys/vminfo.h>
112#include <sys/wait.h>
113
114// Missing prototypes for various system APIs.
115extern "C"
116int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
117
118#if !defined(_AIXVERSION_610)
119extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
120extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
121extern "C" int getargs   (procsinfo*, int, char*, int);
122#endif
123
124#define MAX_PATH (2 * K)
125
126// for timer info max values which include all bits
127#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
128// for multipage initialization error analysis (in 'g_multipage_error')
129#define ERROR_MP_OS_TOO_OLD                          100
130#define ERROR_MP_EXTSHM_ACTIVE                       101
131#define ERROR_MP_VMGETINFO_FAILED                    102
132#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
133
134static address resolve_function_descriptor_to_code_pointer(address p);
135
136static void vmembk_print_on(outputStream* os);
137
138////////////////////////////////////////////////////////////////////////////////
139// global variables (for a description see os_aix.hpp)
140
141julong    os::Aix::_physical_memory = 0;
142
143pthread_t os::Aix::_main_thread = ((pthread_t)0);
144int       os::Aix::_page_size = -1;
145
146// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
147int       os::Aix::_on_pase = -1;
148
149// 0 = uninitialized, otherwise 32 bit number:
150//  0xVVRRTTSS
151//  VV - major version
152//  RR - minor version
153//  TT - tech level, if known, 0 otherwise
154//  SS - service pack, if known, 0 otherwise
155uint32_t  os::Aix::_os_version = 0;
156
157// -1 = uninitialized, 0 - no, 1 - yes
158int       os::Aix::_xpg_sus_mode = -1;
159
160// -1 = uninitialized, 0 - no, 1 - yes
161int       os::Aix::_extshm = -1;
162
163////////////////////////////////////////////////////////////////////////////////
164// local variables
165
166static jlong    initial_time_count = 0;
167static int      clock_tics_per_sec = 100;
168static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
169static bool     check_signals      = true;
170static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
171static sigset_t SR_sigset;
172
173// Process break recorded at startup.
174static address g_brk_at_startup = NULL;
175
176// This describes the state of multipage support of the underlying
177// OS. Note that this is of no interest to the outsize world and
178// therefore should not be defined in AIX class.
179//
180// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
181// latter two (16M "large" resp. 16G "huge" pages) require special
182// setup and are normally not available.
183//
184// AIX supports multiple page sizes per process, for:
185//  - Stack (of the primordial thread, so not relevant for us)
186//  - Data - data, bss, heap, for us also pthread stacks
187//  - Text - text code
188//  - shared memory
189//
190// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
191// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
192//
193// For shared memory, page size can be set dynamically via
194// shmctl(). Different shared memory regions can have different page
195// sizes.
196//
197// More information can be found at AIBM info center:
198//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
199//
200static struct {
201  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
202  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
203  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
204  size_t pthr_stack_pagesize; // stack page size of pthread threads
205  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
206  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
207  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
208  int error;                  // Error describing if something went wrong at multipage init.
209} g_multipage_support = {
210  (size_t) -1,
211  (size_t) -1,
212  (size_t) -1,
213  (size_t) -1,
214  (size_t) -1,
215  false, false,
216  0
217};
218
219// We must not accidentally allocate memory close to the BRK - even if
220// that would work - because then we prevent the BRK segment from
221// growing which may result in a malloc OOM even though there is
222// enough memory. The problem only arises if we shmat() or mmap() at
223// a specific wish address, e.g. to place the heap in a
224// compressed-oops-friendly way.
225static bool is_close_to_brk(address a) {
226  assert0(g_brk_at_startup != NULL);
227  if (a >= g_brk_at_startup &&
228      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
229    return true;
230  }
231  return false;
232}
233
234julong os::available_memory() {
235  return Aix::available_memory();
236}
237
238julong os::Aix::available_memory() {
239  // Avoid expensive API call here, as returned value will always be null.
240  if (os::Aix::on_pase()) {
241    return 0x0LL;
242  }
243  os::Aix::meminfo_t mi;
244  if (os::Aix::get_meminfo(&mi)) {
245    return mi.real_free;
246  } else {
247    return ULONG_MAX;
248  }
249}
250
251julong os::physical_memory() {
252  return Aix::physical_memory();
253}
254
255// Return true if user is running as root.
256
257bool os::have_special_privileges() {
258  static bool init = false;
259  static bool privileges = false;
260  if (!init) {
261    privileges = (getuid() != geteuid()) || (getgid() != getegid());
262    init = true;
263  }
264  return privileges;
265}
266
267// Helper function, emulates disclaim64 using multiple 32bit disclaims
268// because we cannot use disclaim64() on AS/400 and old AIX releases.
269static bool my_disclaim64(char* addr, size_t size) {
270
271  if (size == 0) {
272    return true;
273  }
274
275  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
276  const unsigned int maxDisclaimSize = 0x40000000;
277
278  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
279  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
280
281  char* p = addr;
282
283  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
284    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
285      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
286      return false;
287    }
288    p += maxDisclaimSize;
289  }
290
291  if (lastDisclaimSize > 0) {
292    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
293      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
294      return false;
295    }
296  }
297
298  return true;
299}
300
301// Cpu architecture string
302#if defined(PPC32)
303static char cpu_arch[] = "ppc";
304#elif defined(PPC64)
305static char cpu_arch[] = "ppc64";
306#else
307#error Add appropriate cpu_arch setting
308#endif
309
310// Wrap the function "vmgetinfo" which is not available on older OS releases.
311static int checked_vmgetinfo(void *out, int command, int arg) {
312  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
313    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
314  }
315  return ::vmgetinfo(out, command, arg);
316}
317
318// Given an address, returns the size of the page backing that address.
319size_t os::Aix::query_pagesize(void* addr) {
320
321  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
322    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
323    return 4*K;
324  }
325
326  vm_page_info pi;
327  pi.addr = (uint64_t)addr;
328  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
329    return pi.pagesize;
330  } else {
331    assert(false, "vmgetinfo failed to retrieve page size");
332    return 4*K;
333  }
334}
335
336void os::Aix::initialize_system_info() {
337
338  // Get the number of online(logical) cpus instead of configured.
339  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
340  assert(_processor_count > 0, "_processor_count must be > 0");
341
342  // Retrieve total physical storage.
343  os::Aix::meminfo_t mi;
344  if (!os::Aix::get_meminfo(&mi)) {
345    assert(false, "os::Aix::get_meminfo failed.");
346  }
347  _physical_memory = (julong) mi.real_total;
348}
349
350// Helper function for tracing page sizes.
351static const char* describe_pagesize(size_t pagesize) {
352  switch (pagesize) {
353    case 4*K : return "4K";
354    case 64*K: return "64K";
355    case 16*M: return "16M";
356    case 16*G: return "16G";
357    default:
358      assert(false, "surprise");
359      return "??";
360  }
361}
362
363// Probe OS for multipage support.
364// Will fill the global g_multipage_support structure.
365// Must be called before calling os::large_page_init().
366static void query_multipage_support() {
367
368  guarantee(g_multipage_support.pagesize == -1,
369            "do not call twice");
370
371  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
372
373  // This really would surprise me.
374  assert(g_multipage_support.pagesize == 4*K, "surprise!");
375
376  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
377  // Default data page size is defined either by linker options (-bdatapsize)
378  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
379  // default should be 4K.
380  {
381    void* p = ::malloc(16*M);
382    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
383    ::free(p);
384  }
385
386  // Query default shm page size (LDR_CNTRL SHMPSIZE).
387  // Note that this is pure curiosity. We do not rely on default page size but set
388  // our own page size after allocated.
389  {
390    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
391    guarantee(shmid != -1, "shmget failed");
392    void* p = ::shmat(shmid, NULL, 0);
393    ::shmctl(shmid, IPC_RMID, NULL);
394    guarantee(p != (void*) -1, "shmat failed");
395    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
396    ::shmdt(p);
397  }
398
399  // Before querying the stack page size, make sure we are not running as primordial
400  // thread (because primordial thread's stack may have different page size than
401  // pthread thread stacks). Running a VM on the primordial thread won't work for a
402  // number of reasons so we may just as well guarantee it here.
403  guarantee0(!os::Aix::is_primordial_thread());
404
405  // Query pthread stack page size. Should be the same as data page size because
406  // pthread stacks are allocated from C-Heap.
407  {
408    int dummy = 0;
409    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
410  }
411
412  // Query default text page size (LDR_CNTRL TEXTPSIZE).
413  {
414    address any_function =
415      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
416    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
417  }
418
419  // Now probe for support of 64K pages and 16M pages.
420
421  // Before OS/400 V6R1, there is no support for pages other than 4K.
422  if (os::Aix::on_pase_V5R4_or_older()) {
423    trcVerbose("OS/400 < V6R1 - no large page support.");
424    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
425    goto query_multipage_support_end;
426  }
427
428  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
429  {
430    const int MAX_PAGE_SIZES = 4;
431    psize_t sizes[MAX_PAGE_SIZES];
432    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
433    if (num_psizes == -1) {
434      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
435      trcVerbose("disabling multipage support.");
436      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
437      goto query_multipage_support_end;
438    }
439    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
440    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
441    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
442    for (int i = 0; i < num_psizes; i ++) {
443      trcVerbose(" %s ", describe_pagesize(sizes[i]));
444    }
445
446    // Can we use 64K, 16M pages?
447    for (int i = 0; i < num_psizes; i ++) {
448      const size_t pagesize = sizes[i];
449      if (pagesize != 64*K && pagesize != 16*M) {
450        continue;
451      }
452      bool can_use = false;
453      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
454      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
455        IPC_CREAT | S_IRUSR | S_IWUSR);
456      guarantee0(shmid != -1); // Should always work.
457      // Try to set pagesize.
458      struct shmid_ds shm_buf = { 0 };
459      shm_buf.shm_pagesize = pagesize;
460      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
461        const int en = errno;
462        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
463        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
464          errno);
465      } else {
466        // Attach and double check pageisze.
467        void* p = ::shmat(shmid, NULL, 0);
468        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
469        guarantee0(p != (void*) -1); // Should always work.
470        const size_t real_pagesize = os::Aix::query_pagesize(p);
471        if (real_pagesize != pagesize) {
472          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
473        } else {
474          can_use = true;
475        }
476        ::shmdt(p);
477      }
478      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
479      if (pagesize == 64*K) {
480        g_multipage_support.can_use_64K_pages = can_use;
481      } else if (pagesize == 16*M) {
482        g_multipage_support.can_use_16M_pages = can_use;
483      }
484    }
485
486  } // end: check which pages can be used for shared memory
487
488query_multipage_support_end:
489
490  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
491      describe_pagesize(g_multipage_support.pagesize));
492  trcVerbose("Data page size (C-Heap, bss, etc): %s",
493      describe_pagesize(g_multipage_support.datapsize));
494  trcVerbose("Text page size: %s",
495      describe_pagesize(g_multipage_support.textpsize));
496  trcVerbose("Thread stack page size (pthread): %s",
497      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
498  trcVerbose("Default shared memory page size: %s",
499      describe_pagesize(g_multipage_support.shmpsize));
500  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
501      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
502  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
503      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
504  trcVerbose("Multipage error details: %d",
505      g_multipage_support.error);
506
507  // sanity checks
508  assert0(g_multipage_support.pagesize == 4*K);
509  assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
510  assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
511  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
512  assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
513
514}
515
516void os::init_system_properties_values() {
517
518#define DEFAULT_LIBPATH "/lib:/usr/lib"
519#define EXTENSIONS_DIR  "/lib/ext"
520
521  // Buffer that fits several sprintfs.
522  // Note that the space for the trailing null is provided
523  // by the nulls included by the sizeof operator.
524  const size_t bufsize =
525    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
526         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
527  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
528
529  // sysclasspath, java_home, dll_dir
530  {
531    char *pslash;
532    os::jvm_path(buf, bufsize);
533
534    // Found the full path to libjvm.so.
535    // Now cut the path to <java_home>/jre if we can.
536    pslash = strrchr(buf, '/');
537    if (pslash != NULL) {
538      *pslash = '\0';            // Get rid of /libjvm.so.
539    }
540    pslash = strrchr(buf, '/');
541    if (pslash != NULL) {
542      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
543    }
544    Arguments::set_dll_dir(buf);
545
546    if (pslash != NULL) {
547      pslash = strrchr(buf, '/');
548      if (pslash != NULL) {
549        *pslash = '\0';        // Get rid of /lib.
550      }
551    }
552    Arguments::set_java_home(buf);
553    set_boot_path('/', ':');
554  }
555
556  // Where to look for native libraries.
557
558  // On Aix we get the user setting of LIBPATH.
559  // Eventually, all the library path setting will be done here.
560  // Get the user setting of LIBPATH.
561  const char *v = ::getenv("LIBPATH");
562  const char *v_colon = ":";
563  if (v == NULL) { v = ""; v_colon = ""; }
564
565  // Concatenate user and invariant part of ld_library_path.
566  // That's +1 for the colon and +1 for the trailing '\0'.
567  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
568  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
569  Arguments::set_library_path(ld_library_path);
570  FREE_C_HEAP_ARRAY(char, ld_library_path);
571
572  // Extensions directories.
573  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
574  Arguments::set_ext_dirs(buf);
575
576  FREE_C_HEAP_ARRAY(char, buf);
577
578#undef DEFAULT_LIBPATH
579#undef EXTENSIONS_DIR
580}
581
582////////////////////////////////////////////////////////////////////////////////
583// breakpoint support
584
585void os::breakpoint() {
586  BREAKPOINT;
587}
588
589extern "C" void breakpoint() {
590  // use debugger to set breakpoint here
591}
592
593////////////////////////////////////////////////////////////////////////////////
594// signal support
595
596debug_only(static bool signal_sets_initialized = false);
597static sigset_t unblocked_sigs, vm_sigs;
598
599bool os::Aix::is_sig_ignored(int sig) {
600  struct sigaction oact;
601  sigaction(sig, (struct sigaction*)NULL, &oact);
602  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
603    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
604  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
605    return true;
606  } else {
607    return false;
608  }
609}
610
611void os::Aix::signal_sets_init() {
612  // Should also have an assertion stating we are still single-threaded.
613  assert(!signal_sets_initialized, "Already initialized");
614  // Fill in signals that are necessarily unblocked for all threads in
615  // the VM. Currently, we unblock the following signals:
616  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
617  //                         by -Xrs (=ReduceSignalUsage));
618  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
619  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
620  // the dispositions or masks wrt these signals.
621  // Programs embedding the VM that want to use the above signals for their
622  // own purposes must, at this time, use the "-Xrs" option to prevent
623  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
624  // (See bug 4345157, and other related bugs).
625  // In reality, though, unblocking these signals is really a nop, since
626  // these signals are not blocked by default.
627  sigemptyset(&unblocked_sigs);
628  sigaddset(&unblocked_sigs, SIGILL);
629  sigaddset(&unblocked_sigs, SIGSEGV);
630  sigaddset(&unblocked_sigs, SIGBUS);
631  sigaddset(&unblocked_sigs, SIGFPE);
632  sigaddset(&unblocked_sigs, SIGTRAP);
633  sigaddset(&unblocked_sigs, SR_signum);
634
635  if (!ReduceSignalUsage) {
636   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
637     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
638   }
639   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
640     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
641   }
642   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
643     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
644   }
645  }
646  // Fill in signals that are blocked by all but the VM thread.
647  sigemptyset(&vm_sigs);
648  if (!ReduceSignalUsage)
649    sigaddset(&vm_sigs, BREAK_SIGNAL);
650  debug_only(signal_sets_initialized = true);
651}
652
653// These are signals that are unblocked while a thread is running Java.
654// (For some reason, they get blocked by default.)
655sigset_t* os::Aix::unblocked_signals() {
656  assert(signal_sets_initialized, "Not initialized");
657  return &unblocked_sigs;
658}
659
660// These are the signals that are blocked while a (non-VM) thread is
661// running Java. Only the VM thread handles these signals.
662sigset_t* os::Aix::vm_signals() {
663  assert(signal_sets_initialized, "Not initialized");
664  return &vm_sigs;
665}
666
667void os::Aix::hotspot_sigmask(Thread* thread) {
668
669  //Save caller's signal mask before setting VM signal mask
670  sigset_t caller_sigmask;
671  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
672
673  OSThread* osthread = thread->osthread();
674  osthread->set_caller_sigmask(caller_sigmask);
675
676  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
677
678  if (!ReduceSignalUsage) {
679    if (thread->is_VM_thread()) {
680      // Only the VM thread handles BREAK_SIGNAL ...
681      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
682    } else {
683      // ... all other threads block BREAK_SIGNAL
684      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
685    }
686  }
687}
688
689// retrieve memory information.
690// Returns false if something went wrong;
691// content of pmi undefined in this case.
692bool os::Aix::get_meminfo(meminfo_t* pmi) {
693
694  assert(pmi, "get_meminfo: invalid parameter");
695
696  memset(pmi, 0, sizeof(meminfo_t));
697
698  if (os::Aix::on_pase()) {
699    // On PASE, use the libo4 porting library.
700
701    unsigned long long virt_total = 0;
702    unsigned long long real_total = 0;
703    unsigned long long real_free = 0;
704    unsigned long long pgsp_total = 0;
705    unsigned long long pgsp_free = 0;
706    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
707      pmi->virt_total = virt_total;
708      pmi->real_total = real_total;
709      pmi->real_free = real_free;
710      pmi->pgsp_total = pgsp_total;
711      pmi->pgsp_free = pgsp_free;
712      return true;
713    }
714    return false;
715
716  } else {
717
718    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
719    // See:
720    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
721    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
722    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
723    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
724
725    perfstat_memory_total_t psmt;
726    memset (&psmt, '\0', sizeof(psmt));
727    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
728    if (rc == -1) {
729      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
730      assert(0, "perfstat_memory_total() failed");
731      return false;
732    }
733
734    assert(rc == 1, "perfstat_memory_total() - weird return code");
735
736    // excerpt from
737    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
738    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
739    // The fields of perfstat_memory_total_t:
740    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
741    // u_longlong_t real_total         Total real memory (in 4 KB pages).
742    // u_longlong_t real_free          Free real memory (in 4 KB pages).
743    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
744    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
745
746    pmi->virt_total = psmt.virt_total * 4096;
747    pmi->real_total = psmt.real_total * 4096;
748    pmi->real_free = psmt.real_free * 4096;
749    pmi->pgsp_total = psmt.pgsp_total * 4096;
750    pmi->pgsp_free = psmt.pgsp_free * 4096;
751
752    return true;
753
754  }
755} // end os::Aix::get_meminfo
756
757//////////////////////////////////////////////////////////////////////////////
758// create new thread
759
760// Thread start routine for all newly created threads
761static void *thread_native_entry(Thread *thread) {
762
763  // find out my own stack dimensions
764  {
765    // actually, this should do exactly the same as thread->record_stack_base_and_size...
766    thread->set_stack_base(os::current_stack_base());
767    thread->set_stack_size(os::current_stack_size());
768  }
769
770  const pthread_t pthread_id = ::pthread_self();
771  const tid_t kernel_thread_id = ::thread_self();
772
773  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
774    os::current_thread_id(), (uintx) kernel_thread_id);
775
776  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
777  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
778  // tools hook pthread_create(). In this case, we may run into problems establishing
779  // guard pages on those stacks, because the stacks may reside in memory which is not
780  // protectable (shmated).
781  if (thread->stack_base() > ::sbrk(0)) {
782    log_warning(os, thread)("Thread stack not in data segment.");
783  }
784
785  // Try to randomize the cache line index of hot stack frames.
786  // This helps when threads of the same stack traces evict each other's
787  // cache lines. The threads can be either from the same JVM instance, or
788  // from different JVM instances. The benefit is especially true for
789  // processors with hyperthreading technology.
790
791  static int counter = 0;
792  int pid = os::current_process_id();
793  alloca(((pid ^ counter++) & 7) * 128);
794
795  thread->initialize_thread_current();
796
797  OSThread* osthread = thread->osthread();
798
799  // Thread_id is pthread id.
800  osthread->set_thread_id(pthread_id);
801
802  // .. but keep kernel thread id too for diagnostics
803  osthread->set_kernel_thread_id(kernel_thread_id);
804
805  // Initialize signal mask for this thread.
806  os::Aix::hotspot_sigmask(thread);
807
808  // Initialize floating point control register.
809  os::Aix::init_thread_fpu_state();
810
811  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
812
813  // Call one more level start routine.
814  thread->run();
815
816  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
817    os::current_thread_id(), (uintx) kernel_thread_id);
818
819  // If a thread has not deleted itself ("delete this") as part of its
820  // termination sequence, we have to ensure thread-local-storage is
821  // cleared before we actually terminate. No threads should ever be
822  // deleted asynchronously with respect to their termination.
823  if (Thread::current_or_null_safe() != NULL) {
824    assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
825    thread->clear_thread_current();
826  }
827
828  return 0;
829}
830
831bool os::create_thread(Thread* thread, ThreadType thr_type,
832                       size_t req_stack_size) {
833
834  assert(thread->osthread() == NULL, "caller responsible");
835
836  // Allocate the OSThread object.
837  OSThread* osthread = new OSThread(NULL, NULL);
838  if (osthread == NULL) {
839    return false;
840  }
841
842  // Set the correct thread state.
843  osthread->set_thread_type(thr_type);
844
845  // Initial state is ALLOCATED but not INITIALIZED
846  osthread->set_state(ALLOCATED);
847
848  thread->set_osthread(osthread);
849
850  // Init thread attributes.
851  pthread_attr_t attr;
852  pthread_attr_init(&attr);
853  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
854
855  // Make sure we run in 1:1 kernel-user-thread mode.
856  if (os::Aix::on_aix()) {
857    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
858    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
859  }
860
861  // Start in suspended state, and in os::thread_start, wake the thread up.
862  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
863
864  // Calculate stack size if it's not specified by caller.
865  size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
866  int status = pthread_attr_setstacksize(&attr, stack_size);
867  assert_status(status == 0, status, "pthread_attr_setstacksize");
868
869  // Configure libc guard page.
870  pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
871
872  pthread_t tid;
873  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
874
875  char buf[64];
876  if (ret == 0) {
877    log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
878      (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
879  } else {
880    log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
881      ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
882  }
883
884  pthread_attr_destroy(&attr);
885
886  if (ret != 0) {
887    // Need to clean up stuff we've allocated so far.
888    thread->set_osthread(NULL);
889    delete osthread;
890    return false;
891  }
892
893  // OSThread::thread_id is the pthread id.
894  osthread->set_thread_id(tid);
895
896  return true;
897}
898
899/////////////////////////////////////////////////////////////////////////////
900// attach existing thread
901
902// bootstrap the main thread
903bool os::create_main_thread(JavaThread* thread) {
904  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
905  return create_attached_thread(thread);
906}
907
908bool os::create_attached_thread(JavaThread* thread) {
909#ifdef ASSERT
910    thread->verify_not_published();
911#endif
912
913  // Allocate the OSThread object
914  OSThread* osthread = new OSThread(NULL, NULL);
915
916  if (osthread == NULL) {
917    return false;
918  }
919
920  const pthread_t pthread_id = ::pthread_self();
921  const tid_t kernel_thread_id = ::thread_self();
922
923  // OSThread::thread_id is the pthread id.
924  osthread->set_thread_id(pthread_id);
925
926  // .. but keep kernel thread id too for diagnostics
927  osthread->set_kernel_thread_id(kernel_thread_id);
928
929  // initialize floating point control register
930  os::Aix::init_thread_fpu_state();
931
932  // Initial thread state is RUNNABLE
933  osthread->set_state(RUNNABLE);
934
935  thread->set_osthread(osthread);
936
937  if (UseNUMA) {
938    int lgrp_id = os::numa_get_group_id();
939    if (lgrp_id != -1) {
940      thread->set_lgrp_id(lgrp_id);
941    }
942  }
943
944  // initialize signal mask for this thread
945  // and save the caller's signal mask
946  os::Aix::hotspot_sigmask(thread);
947
948  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
949    os::current_thread_id(), (uintx) kernel_thread_id);
950
951  return true;
952}
953
954void os::pd_start_thread(Thread* thread) {
955  int status = pthread_continue_np(thread->osthread()->pthread_id());
956  assert(status == 0, "thr_continue failed");
957}
958
959// Free OS resources related to the OSThread
960void os::free_thread(OSThread* osthread) {
961  assert(osthread != NULL, "osthread not set");
962
963  // We are told to free resources of the argument thread,
964  // but we can only really operate on the current thread.
965  assert(Thread::current()->osthread() == osthread,
966         "os::free_thread but not current thread");
967
968  // Restore caller's signal mask
969  sigset_t sigmask = osthread->caller_sigmask();
970  pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
971
972  delete osthread;
973}
974
975////////////////////////////////////////////////////////////////////////////////
976// time support
977
978// Time since start-up in seconds to a fine granularity.
979// Used by VMSelfDestructTimer and the MemProfiler.
980double os::elapsedTime() {
981  return (double)(os::elapsed_counter()) * 0.000001;
982}
983
984jlong os::elapsed_counter() {
985  timeval time;
986  int status = gettimeofday(&time, NULL);
987  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
988}
989
990jlong os::elapsed_frequency() {
991  return (1000 * 1000);
992}
993
994bool os::supports_vtime() { return true; }
995bool os::enable_vtime()   { return false; }
996bool os::vtime_enabled()  { return false; }
997
998double os::elapsedVTime() {
999  struct rusage usage;
1000  int retval = getrusage(RUSAGE_THREAD, &usage);
1001  if (retval == 0) {
1002    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1003  } else {
1004    // better than nothing, but not much
1005    return elapsedTime();
1006  }
1007}
1008
1009jlong os::javaTimeMillis() {
1010  timeval time;
1011  int status = gettimeofday(&time, NULL);
1012  assert(status != -1, "aix error at gettimeofday()");
1013  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1014}
1015
1016void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1017  timeval time;
1018  int status = gettimeofday(&time, NULL);
1019  assert(status != -1, "aix error at gettimeofday()");
1020  seconds = jlong(time.tv_sec);
1021  nanos = jlong(time.tv_usec) * 1000;
1022}
1023
1024jlong os::javaTimeNanos() {
1025  if (os::Aix::on_pase()) {
1026
1027    timeval time;
1028    int status = gettimeofday(&time, NULL);
1029    assert(status != -1, "PASE error at gettimeofday()");
1030    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1031    return 1000 * usecs;
1032
1033  } else {
1034    // On AIX use the precision of processors real time clock
1035    // or time base registers.
1036    timebasestruct_t time;
1037    int rc;
1038
1039    // If the CPU has a time register, it will be used and
1040    // we have to convert to real time first. After convertion we have following data:
1041    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1042    // time.tb_low  [nanoseconds after the last full second above]
1043    // We better use mread_real_time here instead of read_real_time
1044    // to ensure that we will get a monotonic increasing time.
1045    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1046      rc = time_base_to_time(&time, TIMEBASE_SZ);
1047      assert(rc != -1, "aix error at time_base_to_time()");
1048    }
1049    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1050  }
1051}
1052
1053void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1054  info_ptr->max_value = ALL_64_BITS;
1055  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1056  info_ptr->may_skip_backward = false;
1057  info_ptr->may_skip_forward = false;
1058  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1059}
1060
1061// Return the real, user, and system times in seconds from an
1062// arbitrary fixed point in the past.
1063bool os::getTimesSecs(double* process_real_time,
1064                      double* process_user_time,
1065                      double* process_system_time) {
1066  struct tms ticks;
1067  clock_t real_ticks = times(&ticks);
1068
1069  if (real_ticks == (clock_t) (-1)) {
1070    return false;
1071  } else {
1072    double ticks_per_second = (double) clock_tics_per_sec;
1073    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1074    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1075    *process_real_time = ((double) real_ticks) / ticks_per_second;
1076
1077    return true;
1078  }
1079}
1080
1081char * os::local_time_string(char *buf, size_t buflen) {
1082  struct tm t;
1083  time_t long_time;
1084  time(&long_time);
1085  localtime_r(&long_time, &t);
1086  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1087               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1088               t.tm_hour, t.tm_min, t.tm_sec);
1089  return buf;
1090}
1091
1092struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1093  return localtime_r(clock, res);
1094}
1095
1096////////////////////////////////////////////////////////////////////////////////
1097// runtime exit support
1098
1099// Note: os::shutdown() might be called very early during initialization, or
1100// called from signal handler. Before adding something to os::shutdown(), make
1101// sure it is async-safe and can handle partially initialized VM.
1102void os::shutdown() {
1103
1104  // allow PerfMemory to attempt cleanup of any persistent resources
1105  perfMemory_exit();
1106
1107  // needs to remove object in file system
1108  AttachListener::abort();
1109
1110  // flush buffered output, finish log files
1111  ostream_abort();
1112
1113  // Check for abort hook
1114  abort_hook_t abort_hook = Arguments::abort_hook();
1115  if (abort_hook != NULL) {
1116    abort_hook();
1117  }
1118}
1119
1120// Note: os::abort() might be called very early during initialization, or
1121// called from signal handler. Before adding something to os::abort(), make
1122// sure it is async-safe and can handle partially initialized VM.
1123void os::abort(bool dump_core, void* siginfo, const void* context) {
1124  os::shutdown();
1125  if (dump_core) {
1126#ifndef PRODUCT
1127    fdStream out(defaultStream::output_fd());
1128    out.print_raw("Current thread is ");
1129    char buf[16];
1130    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1131    out.print_raw_cr(buf);
1132    out.print_raw_cr("Dumping core ...");
1133#endif
1134    ::abort(); // dump core
1135  }
1136
1137  ::exit(1);
1138}
1139
1140// Die immediately, no exit hook, no abort hook, no cleanup.
1141void os::die() {
1142  ::abort();
1143}
1144
1145// This method is a copy of JDK's sysGetLastErrorString
1146// from src/solaris/hpi/src/system_md.c
1147
1148size_t os::lasterror(char *buf, size_t len) {
1149  if (errno == 0) return 0;
1150
1151  const char *s = os::strerror(errno);
1152  size_t n = ::strlen(s);
1153  if (n >= len) {
1154    n = len - 1;
1155  }
1156  ::strncpy(buf, s, n);
1157  buf[n] = '\0';
1158  return n;
1159}
1160
1161intx os::current_thread_id() {
1162  return (intx)pthread_self();
1163}
1164
1165int os::current_process_id() {
1166  return getpid();
1167}
1168
1169// DLL functions
1170
1171const char* os::dll_file_extension() { return ".so"; }
1172
1173// This must be hard coded because it's the system's temporary
1174// directory not the java application's temp directory, ala java.io.tmpdir.
1175const char* os::get_temp_directory() { return "/tmp"; }
1176
1177static bool file_exists(const char* filename) {
1178  struct stat statbuf;
1179  if (filename == NULL || strlen(filename) == 0) {
1180    return false;
1181  }
1182  return os::stat(filename, &statbuf) == 0;
1183}
1184
1185bool os::dll_build_name(char* buffer, size_t buflen,
1186                        const char* pname, const char* fname) {
1187  bool retval = false;
1188  // Copied from libhpi
1189  const size_t pnamelen = pname ? strlen(pname) : 0;
1190
1191  // Return error on buffer overflow.
1192  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1193    *buffer = '\0';
1194    return retval;
1195  }
1196
1197  if (pnamelen == 0) {
1198    snprintf(buffer, buflen, "lib%s.so", fname);
1199    retval = true;
1200  } else if (strchr(pname, *os::path_separator()) != NULL) {
1201    int n;
1202    char** pelements = split_path(pname, &n);
1203    if (pelements == NULL) {
1204      return false;
1205    }
1206    for (int i = 0; i < n; i++) {
1207      // Really shouldn't be NULL, but check can't hurt
1208      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1209        continue; // skip the empty path values
1210      }
1211      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1212      if (file_exists(buffer)) {
1213        retval = true;
1214        break;
1215      }
1216    }
1217    // release the storage
1218    for (int i = 0; i < n; i++) {
1219      if (pelements[i] != NULL) {
1220        FREE_C_HEAP_ARRAY(char, pelements[i]);
1221      }
1222    }
1223    if (pelements != NULL) {
1224      FREE_C_HEAP_ARRAY(char*, pelements);
1225    }
1226  } else {
1227    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1228    retval = true;
1229  }
1230  return retval;
1231}
1232
1233// Check if addr is inside libjvm.so.
1234bool os::address_is_in_vm(address addr) {
1235
1236  // Input could be a real pc or a function pointer literal. The latter
1237  // would be a function descriptor residing in the data segment of a module.
1238  loaded_module_t lm;
1239  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1240    return lm.is_in_vm;
1241  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1242    return lm.is_in_vm;
1243  } else {
1244    return false;
1245  }
1246
1247}
1248
1249// Resolve an AIX function descriptor literal to a code pointer.
1250// If the input is a valid code pointer to a text segment of a loaded module,
1251//   it is returned unchanged.
1252// If the input is a valid AIX function descriptor, it is resolved to the
1253//   code entry point.
1254// If the input is neither a valid function descriptor nor a valid code pointer,
1255//   NULL is returned.
1256static address resolve_function_descriptor_to_code_pointer(address p) {
1257
1258  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1259    // It is a real code pointer.
1260    return p;
1261  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1262    // Pointer to data segment, potential function descriptor.
1263    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1264    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1265      // It is a function descriptor.
1266      return code_entry;
1267    }
1268  }
1269
1270  return NULL;
1271}
1272
1273bool os::dll_address_to_function_name(address addr, char *buf,
1274                                      int buflen, int *offset,
1275                                      bool demangle) {
1276  if (offset) {
1277    *offset = -1;
1278  }
1279  // Buf is not optional, but offset is optional.
1280  assert(buf != NULL, "sanity check");
1281  buf[0] = '\0';
1282
1283  // Resolve function ptr literals first.
1284  addr = resolve_function_descriptor_to_code_pointer(addr);
1285  if (!addr) {
1286    return false;
1287  }
1288
1289  return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1290}
1291
1292bool os::dll_address_to_library_name(address addr, char* buf,
1293                                     int buflen, int* offset) {
1294  if (offset) {
1295    *offset = -1;
1296  }
1297  // Buf is not optional, but offset is optional.
1298  assert(buf != NULL, "sanity check");
1299  buf[0] = '\0';
1300
1301  // Resolve function ptr literals first.
1302  addr = resolve_function_descriptor_to_code_pointer(addr);
1303  if (!addr) {
1304    return false;
1305  }
1306
1307  return AixSymbols::get_module_name(addr, buf, buflen);
1308}
1309
1310// Loads .dll/.so and in case of error it checks if .dll/.so was built
1311// for the same architecture as Hotspot is running on.
1312void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1313
1314  if (ebuf && ebuflen > 0) {
1315    ebuf[0] = '\0';
1316    ebuf[ebuflen - 1] = '\0';
1317  }
1318
1319  if (!filename || strlen(filename) == 0) {
1320    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1321    return NULL;
1322  }
1323
1324  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1325  void * result= ::dlopen(filename, RTLD_LAZY);
1326  if (result != NULL) {
1327    // Reload dll cache. Don't do this in signal handling.
1328    LoadedLibraries::reload();
1329    return result;
1330  } else {
1331    // error analysis when dlopen fails
1332    const char* const error_report = ::dlerror();
1333    if (error_report && ebuf && ebuflen > 0) {
1334      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1335               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1336    }
1337  }
1338  return NULL;
1339}
1340
1341void* os::dll_lookup(void* handle, const char* name) {
1342  void* res = dlsym(handle, name);
1343  return res;
1344}
1345
1346void* os::get_default_process_handle() {
1347  return (void*)::dlopen(NULL, RTLD_LAZY);
1348}
1349
1350void os::print_dll_info(outputStream *st) {
1351  st->print_cr("Dynamic libraries:");
1352  LoadedLibraries::print(st);
1353}
1354
1355void os::get_summary_os_info(char* buf, size_t buflen) {
1356  // There might be something more readable than uname results for AIX.
1357  struct utsname name;
1358  uname(&name);
1359  snprintf(buf, buflen, "%s %s", name.release, name.version);
1360}
1361
1362void os::print_os_info(outputStream* st) {
1363  st->print("OS:");
1364
1365  st->print("uname:");
1366  struct utsname name;
1367  uname(&name);
1368  st->print(name.sysname); st->print(" ");
1369  st->print(name.nodename); st->print(" ");
1370  st->print(name.release); st->print(" ");
1371  st->print(name.version); st->print(" ");
1372  st->print(name.machine);
1373  st->cr();
1374
1375  uint32_t ver = os::Aix::os_version();
1376  st->print_cr("AIX kernel version %u.%u.%u.%u",
1377               (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1378
1379  os::Posix::print_rlimit_info(st);
1380
1381  // load average
1382  st->print("load average:");
1383  double loadavg[3] = {-1.L, -1.L, -1.L};
1384  os::loadavg(loadavg, 3);
1385  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1386  st->cr();
1387
1388  // print wpar info
1389  libperfstat::wparinfo_t wi;
1390  if (libperfstat::get_wparinfo(&wi)) {
1391    st->print_cr("wpar info");
1392    st->print_cr("name: %s", wi.name);
1393    st->print_cr("id:   %d", wi.wpar_id);
1394    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1395  }
1396
1397  // print partition info
1398  libperfstat::partitioninfo_t pi;
1399  if (libperfstat::get_partitioninfo(&pi)) {
1400    st->print_cr("partition info");
1401    st->print_cr(" name: %s", pi.name);
1402  }
1403
1404}
1405
1406void os::print_memory_info(outputStream* st) {
1407
1408  st->print_cr("Memory:");
1409
1410  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1411    describe_pagesize(g_multipage_support.pagesize));
1412  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1413    describe_pagesize(g_multipage_support.datapsize));
1414  st->print_cr("  Text page size:                         %s",
1415    describe_pagesize(g_multipage_support.textpsize));
1416  st->print_cr("  Thread stack page size (pthread):       %s",
1417    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1418  st->print_cr("  Default shared memory page size:        %s",
1419    describe_pagesize(g_multipage_support.shmpsize));
1420  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1421    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1422  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1423    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1424  st->print_cr("  Multipage error: %d",
1425    g_multipage_support.error);
1426  st->cr();
1427  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1428
1429  // print out LDR_CNTRL because it affects the default page sizes
1430  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1431  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1432
1433  // Print out EXTSHM because it is an unsupported setting.
1434  const char* const extshm = ::getenv("EXTSHM");
1435  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1436  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1437    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1438  }
1439
1440  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1441  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1442  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1443      aixthread_guardpages ? aixthread_guardpages : "<unset>");
1444
1445  os::Aix::meminfo_t mi;
1446  if (os::Aix::get_meminfo(&mi)) {
1447    char buffer[256];
1448    if (os::Aix::on_aix()) {
1449      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1450      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1451      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1452      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1453    } else {
1454      // PASE - Numbers are result of QWCRSSTS; they mean:
1455      // real_total: Sum of all system pools
1456      // real_free: always 0
1457      // pgsp_total: we take the size of the system ASP
1458      // pgsp_free: size of system ASP times percentage of system ASP unused
1459      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1460      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1461      st->print_cr("%% system asp used : " SIZE_FORMAT,
1462        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1463    }
1464    st->print_raw(buffer);
1465  }
1466  st->cr();
1467
1468  // Print segments allocated with os::reserve_memory.
1469  st->print_cr("internal virtual memory regions used by vm:");
1470  vmembk_print_on(st);
1471}
1472
1473// Get a string for the cpuinfo that is a summary of the cpu type
1474void os::get_summary_cpu_info(char* buf, size_t buflen) {
1475  // This looks good
1476  libperfstat::cpuinfo_t ci;
1477  if (libperfstat::get_cpuinfo(&ci)) {
1478    strncpy(buf, ci.version, buflen);
1479  } else {
1480    strncpy(buf, "AIX", buflen);
1481  }
1482}
1483
1484void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1485  st->print("CPU:");
1486  st->print("total %d", os::processor_count());
1487  // It's not safe to query number of active processors after crash.
1488  // st->print("(active %d)", os::active_processor_count());
1489  st->print(" %s", VM_Version::features());
1490  st->cr();
1491}
1492
1493static void print_signal_handler(outputStream* st, int sig,
1494                                 char* buf, size_t buflen);
1495
1496void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1497  st->print_cr("Signal Handlers:");
1498  print_signal_handler(st, SIGSEGV, buf, buflen);
1499  print_signal_handler(st, SIGBUS , buf, buflen);
1500  print_signal_handler(st, SIGFPE , buf, buflen);
1501  print_signal_handler(st, SIGPIPE, buf, buflen);
1502  print_signal_handler(st, SIGXFSZ, buf, buflen);
1503  print_signal_handler(st, SIGILL , buf, buflen);
1504  print_signal_handler(st, SR_signum, buf, buflen);
1505  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1506  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1507  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1508  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1509  print_signal_handler(st, SIGTRAP, buf, buflen);
1510  // We also want to know if someone else adds a SIGDANGER handler because
1511  // that will interfere with OOM killling.
1512  print_signal_handler(st, SIGDANGER, buf, buflen);
1513}
1514
1515static char saved_jvm_path[MAXPATHLEN] = {0};
1516
1517// Find the full path to the current module, libjvm.so.
1518void os::jvm_path(char *buf, jint buflen) {
1519  // Error checking.
1520  if (buflen < MAXPATHLEN) {
1521    assert(false, "must use a large-enough buffer");
1522    buf[0] = '\0';
1523    return;
1524  }
1525  // Lazy resolve the path to current module.
1526  if (saved_jvm_path[0] != 0) {
1527    strcpy(buf, saved_jvm_path);
1528    return;
1529  }
1530
1531  Dl_info dlinfo;
1532  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1533  assert(ret != 0, "cannot locate libjvm");
1534  char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1535  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1536
1537  if (Arguments::sun_java_launcher_is_altjvm()) {
1538    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1539    // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1540    // If "/jre/lib/" appears at the right place in the string, then
1541    // assume we are installed in a JDK and we're done. Otherwise, check
1542    // for a JAVA_HOME environment variable and fix up the path so it
1543    // looks like libjvm.so is installed there (append a fake suffix
1544    // hotspot/libjvm.so).
1545    const char *p = buf + strlen(buf) - 1;
1546    for (int count = 0; p > buf && count < 4; ++count) {
1547      for (--p; p > buf && *p != '/'; --p)
1548        /* empty */ ;
1549    }
1550
1551    if (strncmp(p, "/jre/lib/", 9) != 0) {
1552      // Look for JAVA_HOME in the environment.
1553      char* java_home_var = ::getenv("JAVA_HOME");
1554      if (java_home_var != NULL && java_home_var[0] != 0) {
1555        char* jrelib_p;
1556        int len;
1557
1558        // Check the current module name "libjvm.so".
1559        p = strrchr(buf, '/');
1560        if (p == NULL) {
1561          return;
1562        }
1563        assert(strstr(p, "/libjvm") == p, "invalid library name");
1564
1565        rp = os::Posix::realpath(java_home_var, buf, buflen);
1566        if (rp == NULL) {
1567          return;
1568        }
1569
1570        // determine if this is a legacy image or modules image
1571        // modules image doesn't have "jre" subdirectory
1572        len = strlen(buf);
1573        assert(len < buflen, "Ran out of buffer room");
1574        jrelib_p = buf + len;
1575        snprintf(jrelib_p, buflen-len, "/jre/lib");
1576        if (0 != access(buf, F_OK)) {
1577          snprintf(jrelib_p, buflen-len, "/lib");
1578        }
1579
1580        if (0 == access(buf, F_OK)) {
1581          // Use current module name "libjvm.so"
1582          len = strlen(buf);
1583          snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1584        } else {
1585          // Go back to path of .so
1586          rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1587          if (rp == NULL) {
1588            return;
1589          }
1590        }
1591      }
1592    }
1593  }
1594
1595  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1596  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1597}
1598
1599void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1600  // no prefix required, not even "_"
1601}
1602
1603void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1604  // no suffix required
1605}
1606
1607////////////////////////////////////////////////////////////////////////////////
1608// sun.misc.Signal support
1609
1610static volatile jint sigint_count = 0;
1611
1612static void
1613UserHandler(int sig, void *siginfo, void *context) {
1614  // 4511530 - sem_post is serialized and handled by the manager thread. When
1615  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1616  // don't want to flood the manager thread with sem_post requests.
1617  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1618    return;
1619
1620  // Ctrl-C is pressed during error reporting, likely because the error
1621  // handler fails to abort. Let VM die immediately.
1622  if (sig == SIGINT && VMError::is_error_reported()) {
1623    os::die();
1624  }
1625
1626  os::signal_notify(sig);
1627}
1628
1629void* os::user_handler() {
1630  return CAST_FROM_FN_PTR(void*, UserHandler);
1631}
1632
1633extern "C" {
1634  typedef void (*sa_handler_t)(int);
1635  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1636}
1637
1638void* os::signal(int signal_number, void* handler) {
1639  struct sigaction sigAct, oldSigAct;
1640
1641  sigfillset(&(sigAct.sa_mask));
1642
1643  // Do not block out synchronous signals in the signal handler.
1644  // Blocking synchronous signals only makes sense if you can really
1645  // be sure that those signals won't happen during signal handling,
1646  // when the blocking applies. Normal signal handlers are lean and
1647  // do not cause signals. But our signal handlers tend to be "risky"
1648  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1649  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1650  // by a SIGILL, which was blocked due to the signal mask. The process
1651  // just hung forever. Better to crash from a secondary signal than to hang.
1652  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1653  sigdelset(&(sigAct.sa_mask), SIGBUS);
1654  sigdelset(&(sigAct.sa_mask), SIGILL);
1655  sigdelset(&(sigAct.sa_mask), SIGFPE);
1656  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1657
1658  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1659
1660  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1661
1662  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1663    // -1 means registration failed
1664    return (void *)-1;
1665  }
1666
1667  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1668}
1669
1670void os::signal_raise(int signal_number) {
1671  ::raise(signal_number);
1672}
1673
1674//
1675// The following code is moved from os.cpp for making this
1676// code platform specific, which it is by its very nature.
1677//
1678
1679// Will be modified when max signal is changed to be dynamic
1680int os::sigexitnum_pd() {
1681  return NSIG;
1682}
1683
1684// a counter for each possible signal value
1685static volatile jint pending_signals[NSIG+1] = { 0 };
1686
1687// Wrapper functions for: sem_init(), sem_post(), sem_wait()
1688// On AIX, we use sem_init(), sem_post(), sem_wait()
1689// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1690// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1691// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1692// on AIX, msem_..() calls are suspected of causing problems.
1693static sem_t sig_sem;
1694static msemaphore* p_sig_msem = 0;
1695
1696static void local_sem_init() {
1697  if (os::Aix::on_aix()) {
1698    int rc = ::sem_init(&sig_sem, 0, 0);
1699    guarantee(rc != -1, "sem_init failed");
1700  } else {
1701    // Memory semaphores must live in shared mem.
1702    guarantee0(p_sig_msem == NULL);
1703    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1704    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1705    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1706  }
1707}
1708
1709static void local_sem_post() {
1710  static bool warn_only_once = false;
1711  if (os::Aix::on_aix()) {
1712    int rc = ::sem_post(&sig_sem);
1713    if (rc == -1 && !warn_only_once) {
1714      trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1715      warn_only_once = true;
1716    }
1717  } else {
1718    guarantee0(p_sig_msem != NULL);
1719    int rc = ::msem_unlock(p_sig_msem, 0);
1720    if (rc == -1 && !warn_only_once) {
1721      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1722      warn_only_once = true;
1723    }
1724  }
1725}
1726
1727static void local_sem_wait() {
1728  static bool warn_only_once = false;
1729  if (os::Aix::on_aix()) {
1730    int rc = ::sem_wait(&sig_sem);
1731    if (rc == -1 && !warn_only_once) {
1732      trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1733      warn_only_once = true;
1734    }
1735  } else {
1736    guarantee0(p_sig_msem != NULL); // must init before use
1737    int rc = ::msem_lock(p_sig_msem, 0);
1738    if (rc == -1 && !warn_only_once) {
1739      trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1740      warn_only_once = true;
1741    }
1742  }
1743}
1744
1745void os::signal_init_pd() {
1746  // Initialize signal structures
1747  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1748
1749  // Initialize signal semaphore
1750  local_sem_init();
1751}
1752
1753void os::signal_notify(int sig) {
1754  Atomic::inc(&pending_signals[sig]);
1755  local_sem_post();
1756}
1757
1758static int check_pending_signals(bool wait) {
1759  Atomic::store(0, &sigint_count);
1760  for (;;) {
1761    for (int i = 0; i < NSIG + 1; i++) {
1762      jint n = pending_signals[i];
1763      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1764        return i;
1765      }
1766    }
1767    if (!wait) {
1768      return -1;
1769    }
1770    JavaThread *thread = JavaThread::current();
1771    ThreadBlockInVM tbivm(thread);
1772
1773    bool threadIsSuspended;
1774    do {
1775      thread->set_suspend_equivalent();
1776      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1777
1778      local_sem_wait();
1779
1780      // were we externally suspended while we were waiting?
1781      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1782      if (threadIsSuspended) {
1783        //
1784        // The semaphore has been incremented, but while we were waiting
1785        // another thread suspended us. We don't want to continue running
1786        // while suspended because that would surprise the thread that
1787        // suspended us.
1788        //
1789
1790        local_sem_post();
1791
1792        thread->java_suspend_self();
1793      }
1794    } while (threadIsSuspended);
1795  }
1796}
1797
1798int os::signal_lookup() {
1799  return check_pending_signals(false);
1800}
1801
1802int os::signal_wait() {
1803  return check_pending_signals(true);
1804}
1805
1806////////////////////////////////////////////////////////////////////////////////
1807// Virtual Memory
1808
1809// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1810
1811#define VMEM_MAPPED  1
1812#define VMEM_SHMATED 2
1813
1814struct vmembk_t {
1815  int type;         // 1 - mmap, 2 - shmat
1816  char* addr;
1817  size_t size;      // Real size, may be larger than usersize.
1818  size_t pagesize;  // page size of area
1819  vmembk_t* next;
1820
1821  bool contains_addr(char* p) const {
1822    return p >= addr && p < (addr + size);
1823  }
1824
1825  bool contains_range(char* p, size_t s) const {
1826    return contains_addr(p) && contains_addr(p + s - 1);
1827  }
1828
1829  void print_on(outputStream* os) const {
1830    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1831      " bytes, %d %s pages), %s",
1832      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1833      (type == VMEM_SHMATED ? "shmat" : "mmap")
1834    );
1835  }
1836
1837  // Check that range is a sub range of memory block (or equal to memory block);
1838  // also check that range is fully page aligned to the page size if the block.
1839  void assert_is_valid_subrange(char* p, size_t s) const {
1840    if (!contains_range(p, s)) {
1841      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1842              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1843              p, p + s, addr, addr + size);
1844      guarantee0(false);
1845    }
1846    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1847      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1848              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1849      guarantee0(false);
1850    }
1851  }
1852};
1853
1854static struct {
1855  vmembk_t* first;
1856  MiscUtils::CritSect cs;
1857} vmem;
1858
1859static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1860  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1861  assert0(p);
1862  if (p) {
1863    MiscUtils::AutoCritSect lck(&vmem.cs);
1864    p->addr = addr; p->size = size;
1865    p->pagesize = pagesize;
1866    p->type = type;
1867    p->next = vmem.first;
1868    vmem.first = p;
1869  }
1870}
1871
1872static vmembk_t* vmembk_find(char* addr) {
1873  MiscUtils::AutoCritSect lck(&vmem.cs);
1874  for (vmembk_t* p = vmem.first; p; p = p->next) {
1875    if (p->addr <= addr && (p->addr + p->size) > addr) {
1876      return p;
1877    }
1878  }
1879  return NULL;
1880}
1881
1882static void vmembk_remove(vmembk_t* p0) {
1883  MiscUtils::AutoCritSect lck(&vmem.cs);
1884  assert0(p0);
1885  assert0(vmem.first); // List should not be empty.
1886  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1887    if (*pp == p0) {
1888      *pp = p0->next;
1889      ::free(p0);
1890      return;
1891    }
1892  }
1893  assert0(false); // Not found?
1894}
1895
1896static void vmembk_print_on(outputStream* os) {
1897  MiscUtils::AutoCritSect lck(&vmem.cs);
1898  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1899    vmi->print_on(os);
1900    os->cr();
1901  }
1902}
1903
1904// Reserve and attach a section of System V memory.
1905// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1906// address. Failing that, it will attach the memory anywhere.
1907// If <requested_addr> is NULL, function will attach the memory anywhere.
1908//
1909// <alignment_hint> is being ignored by this function. It is very probable however that the
1910// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1911// Should this be not enogh, we can put more work into it.
1912static char* reserve_shmated_memory (
1913  size_t bytes,
1914  char* requested_addr,
1915  size_t alignment_hint) {
1916
1917  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1918    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1919    bytes, requested_addr, alignment_hint);
1920
1921  // Either give me wish address or wish alignment but not both.
1922  assert0(!(requested_addr != NULL && alignment_hint != 0));
1923
1924  // We must prevent anyone from attaching too close to the
1925  // BRK because that may cause malloc OOM.
1926  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1927    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1928      "Will attach anywhere.", requested_addr);
1929    // Act like the OS refused to attach there.
1930    requested_addr = NULL;
1931  }
1932
1933  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1934  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1935  if (os::Aix::on_pase_V5R4_or_older()) {
1936    ShouldNotReachHere();
1937  }
1938
1939  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1940  const size_t size = align_up(bytes, 64*K);
1941
1942  // Reserve the shared segment.
1943  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1944  if (shmid == -1) {
1945    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1946    return NULL;
1947  }
1948
1949  // Important note:
1950  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1951  // We must right after attaching it remove it from the system. System V shm segments are global and
1952  // survive the process.
1953  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1954
1955  struct shmid_ds shmbuf;
1956  memset(&shmbuf, 0, sizeof(shmbuf));
1957  shmbuf.shm_pagesize = 64*K;
1958  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1959    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1960               size / (64*K), errno);
1961    // I want to know if this ever happens.
1962    assert(false, "failed to set page size for shmat");
1963  }
1964
1965  // Now attach the shared segment.
1966  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1967  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1968  // were not a segment boundary.
1969  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1970  const int errno_shmat = errno;
1971
1972  // (A) Right after shmat and before handing shmat errors delete the shm segment.
1973  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1974    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1975    assert(false, "failed to remove shared memory segment!");
1976  }
1977
1978  // Handle shmat error. If we failed to attach, just return.
1979  if (addr == (char*)-1) {
1980    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1981    return NULL;
1982  }
1983
1984  // Just for info: query the real page size. In case setting the page size did not
1985  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1986  const size_t real_pagesize = os::Aix::query_pagesize(addr);
1987  if (real_pagesize != shmbuf.shm_pagesize) {
1988    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1989  }
1990
1991  if (addr) {
1992    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1993      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1994  } else {
1995    if (requested_addr != NULL) {
1996      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1997    } else {
1998      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1999    }
2000  }
2001
2002  // book-keeping
2003  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2004  assert0(is_aligned_to(addr, os::vm_page_size()));
2005
2006  return addr;
2007}
2008
2009static bool release_shmated_memory(char* addr, size_t size) {
2010
2011  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2012    addr, addr + size - 1);
2013
2014  bool rc = false;
2015
2016  // TODO: is there a way to verify shm size without doing bookkeeping?
2017  if (::shmdt(addr) != 0) {
2018    trcVerbose("error (%d).", errno);
2019  } else {
2020    trcVerbose("ok.");
2021    rc = true;
2022  }
2023  return rc;
2024}
2025
2026static bool uncommit_shmated_memory(char* addr, size_t size) {
2027  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2028    addr, addr + size - 1);
2029
2030  const bool rc = my_disclaim64(addr, size);
2031
2032  if (!rc) {
2033    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2034    return false;
2035  }
2036  return true;
2037}
2038
2039////////////////////////////////  mmap-based routines /////////////////////////////////
2040
2041// Reserve memory via mmap.
2042// If <requested_addr> is given, an attempt is made to attach at the given address.
2043// Failing that, memory is allocated at any address.
2044// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2045// allocate at an address aligned with the given alignment. Failing that, memory
2046// is aligned anywhere.
2047static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2048  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2049    "alignment_hint " UINTX_FORMAT "...",
2050    bytes, requested_addr, alignment_hint);
2051
2052  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2053  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2054    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2055    return NULL;
2056  }
2057
2058  // We must prevent anyone from attaching too close to the
2059  // BRK because that may cause malloc OOM.
2060  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2061    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2062      "Will attach anywhere.", requested_addr);
2063    // Act like the OS refused to attach there.
2064    requested_addr = NULL;
2065  }
2066
2067  // Specify one or the other but not both.
2068  assert0(!(requested_addr != NULL && alignment_hint > 0));
2069
2070  // In 64K mode, we claim the global page size (os::vm_page_size())
2071  // is 64K. This is one of the few points where that illusion may
2072  // break, because mmap() will always return memory aligned to 4K. So
2073  // we must ensure we only ever return memory aligned to 64k.
2074  if (alignment_hint) {
2075    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2076  } else {
2077    alignment_hint = os::vm_page_size();
2078  }
2079
2080  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2081  const size_t size = align_up(bytes, os::vm_page_size());
2082
2083  // alignment: Allocate memory large enough to include an aligned range of the right size and
2084  // cut off the leading and trailing waste pages.
2085  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2086  const size_t extra_size = size + alignment_hint;
2087
2088  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2089  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2090  int flags = MAP_ANONYMOUS | MAP_SHARED;
2091
2092  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2093  // it means if wishaddress is given but MAP_FIXED is not set.
2094  //
2095  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2096  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2097  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2098  // get clobbered.
2099  if (requested_addr != NULL) {
2100    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2101      flags |= MAP_FIXED;
2102    }
2103  }
2104
2105  char* addr = (char*)::mmap(requested_addr, extra_size,
2106      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2107
2108  if (addr == MAP_FAILED) {
2109    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2110    return NULL;
2111  }
2112
2113  // Handle alignment.
2114  char* const addr_aligned = align_up(addr, alignment_hint);
2115  const size_t waste_pre = addr_aligned - addr;
2116  char* const addr_aligned_end = addr_aligned + size;
2117  const size_t waste_post = extra_size - waste_pre - size;
2118  if (waste_pre > 0) {
2119    ::munmap(addr, waste_pre);
2120  }
2121  if (waste_post > 0) {
2122    ::munmap(addr_aligned_end, waste_post);
2123  }
2124  addr = addr_aligned;
2125
2126  if (addr) {
2127    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2128      addr, addr + bytes, bytes);
2129  } else {
2130    if (requested_addr != NULL) {
2131      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2132    } else {
2133      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2134    }
2135  }
2136
2137  // bookkeeping
2138  vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2139
2140  // Test alignment, see above.
2141  assert0(is_aligned_to(addr, os::vm_page_size()));
2142
2143  return addr;
2144}
2145
2146static bool release_mmaped_memory(char* addr, size_t size) {
2147  assert0(is_aligned_to(addr, os::vm_page_size()));
2148  assert0(is_aligned_to(size, os::vm_page_size()));
2149
2150  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2151    addr, addr + size - 1);
2152  bool rc = false;
2153
2154  if (::munmap(addr, size) != 0) {
2155    trcVerbose("failed (%d)\n", errno);
2156    rc = false;
2157  } else {
2158    trcVerbose("ok.");
2159    rc = true;
2160  }
2161
2162  return rc;
2163}
2164
2165static bool uncommit_mmaped_memory(char* addr, size_t size) {
2166
2167  assert0(is_aligned_to(addr, os::vm_page_size()));
2168  assert0(is_aligned_to(size, os::vm_page_size()));
2169
2170  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2171    addr, addr + size - 1);
2172  bool rc = false;
2173
2174  // Uncommit mmap memory with msync MS_INVALIDATE.
2175  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2176    trcVerbose("failed (%d)\n", errno);
2177    rc = false;
2178  } else {
2179    trcVerbose("ok.");
2180    rc = true;
2181  }
2182
2183  return rc;
2184}
2185
2186int os::vm_page_size() {
2187  // Seems redundant as all get out.
2188  assert(os::Aix::page_size() != -1, "must call os::init");
2189  return os::Aix::page_size();
2190}
2191
2192// Aix allocates memory by pages.
2193int os::vm_allocation_granularity() {
2194  assert(os::Aix::page_size() != -1, "must call os::init");
2195  return os::Aix::page_size();
2196}
2197
2198#ifdef PRODUCT
2199static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2200                                    int err) {
2201  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2202          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2203          os::errno_name(err), err);
2204}
2205#endif
2206
2207void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2208                                  const char* mesg) {
2209  assert(mesg != NULL, "mesg must be specified");
2210  if (!pd_commit_memory(addr, size, exec)) {
2211    // Add extra info in product mode for vm_exit_out_of_memory():
2212    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2213    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2214  }
2215}
2216
2217bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2218
2219  assert(is_aligned_to(addr, os::vm_page_size()),
2220    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2221    p2i(addr), os::vm_page_size());
2222  assert(is_aligned_to(size, os::vm_page_size()),
2223    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2224    size, os::vm_page_size());
2225
2226  vmembk_t* const vmi = vmembk_find(addr);
2227  guarantee0(vmi);
2228  vmi->assert_is_valid_subrange(addr, size);
2229
2230  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2231
2232  if (UseExplicitCommit) {
2233    // AIX commits memory on touch. So, touch all pages to be committed.
2234    for (char* p = addr; p < (addr + size); p += 4*K) {
2235      *p = '\0';
2236    }
2237  }
2238
2239  return true;
2240}
2241
2242bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2243  return pd_commit_memory(addr, size, exec);
2244}
2245
2246void os::pd_commit_memory_or_exit(char* addr, size_t size,
2247                                  size_t alignment_hint, bool exec,
2248                                  const char* mesg) {
2249  // Alignment_hint is ignored on this OS.
2250  pd_commit_memory_or_exit(addr, size, exec, mesg);
2251}
2252
2253bool os::pd_uncommit_memory(char* addr, size_t size) {
2254  assert(is_aligned_to(addr, os::vm_page_size()),
2255    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2256    p2i(addr), os::vm_page_size());
2257  assert(is_aligned_to(size, os::vm_page_size()),
2258    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2259    size, os::vm_page_size());
2260
2261  // Dynamically do different things for mmap/shmat.
2262  const vmembk_t* const vmi = vmembk_find(addr);
2263  guarantee0(vmi);
2264  vmi->assert_is_valid_subrange(addr, size);
2265
2266  if (vmi->type == VMEM_SHMATED) {
2267    return uncommit_shmated_memory(addr, size);
2268  } else {
2269    return uncommit_mmaped_memory(addr, size);
2270  }
2271}
2272
2273bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2274  // Do not call this; no need to commit stack pages on AIX.
2275  ShouldNotReachHere();
2276  return true;
2277}
2278
2279bool os::remove_stack_guard_pages(char* addr, size_t size) {
2280  // Do not call this; no need to commit stack pages on AIX.
2281  ShouldNotReachHere();
2282  return true;
2283}
2284
2285void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2286}
2287
2288void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2289}
2290
2291void os::numa_make_global(char *addr, size_t bytes) {
2292}
2293
2294void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2295}
2296
2297bool os::numa_topology_changed() {
2298  return false;
2299}
2300
2301size_t os::numa_get_groups_num() {
2302  return 1;
2303}
2304
2305int os::numa_get_group_id() {
2306  return 0;
2307}
2308
2309size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2310  if (size > 0) {
2311    ids[0] = 0;
2312    return 1;
2313  }
2314  return 0;
2315}
2316
2317bool os::get_page_info(char *start, page_info* info) {
2318  return false;
2319}
2320
2321char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2322  return end;
2323}
2324
2325// Reserves and attaches a shared memory segment.
2326// Will assert if a wish address is given and could not be obtained.
2327char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2328
2329  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2330  // thereby clobbering old mappings at that place. That is probably
2331  // not intended, never used and almost certainly an error were it
2332  // ever be used this way (to try attaching at a specified address
2333  // without clobbering old mappings an alternate API exists,
2334  // os::attempt_reserve_memory_at()).
2335  // Instead of mimicking the dangerous coding of the other platforms, here I
2336  // just ignore the request address (release) or assert(debug).
2337  assert0(requested_addr == NULL);
2338
2339  // Always round to os::vm_page_size(), which may be larger than 4K.
2340  bytes = align_up(bytes, os::vm_page_size());
2341  const size_t alignment_hint0 =
2342    alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2343
2344  // In 4K mode always use mmap.
2345  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2346  if (os::vm_page_size() == 4*K) {
2347    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2348  } else {
2349    if (bytes >= Use64KPagesThreshold) {
2350      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2351    } else {
2352      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2353    }
2354  }
2355}
2356
2357bool os::pd_release_memory(char* addr, size_t size) {
2358
2359  // Dynamically do different things for mmap/shmat.
2360  vmembk_t* const vmi = vmembk_find(addr);
2361  guarantee0(vmi);
2362
2363  // Always round to os::vm_page_size(), which may be larger than 4K.
2364  size = align_up(size, os::vm_page_size());
2365  addr = align_up(addr, os::vm_page_size());
2366
2367  bool rc = false;
2368  bool remove_bookkeeping = false;
2369  if (vmi->type == VMEM_SHMATED) {
2370    // For shmatted memory, we do:
2371    // - If user wants to release the whole range, release the memory (shmdt).
2372    // - If user only wants to release a partial range, uncommit (disclaim) that
2373    //   range. That way, at least, we do not use memory anymore (bust still page
2374    //   table space).
2375    vmi->assert_is_valid_subrange(addr, size);
2376    if (addr == vmi->addr && size == vmi->size) {
2377      rc = release_shmated_memory(addr, size);
2378      remove_bookkeeping = true;
2379    } else {
2380      rc = uncommit_shmated_memory(addr, size);
2381    }
2382  } else {
2383    // User may unmap partial regions but region has to be fully contained.
2384#ifdef ASSERT
2385    vmi->assert_is_valid_subrange(addr, size);
2386#endif
2387    rc = release_mmaped_memory(addr, size);
2388    remove_bookkeeping = true;
2389  }
2390
2391  // update bookkeeping
2392  if (rc && remove_bookkeeping) {
2393    vmembk_remove(vmi);
2394  }
2395
2396  return rc;
2397}
2398
2399static bool checked_mprotect(char* addr, size_t size, int prot) {
2400
2401  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2402  // not tell me if protection failed when trying to protect an un-protectable range.
2403  //
2404  // This means if the memory was allocated using shmget/shmat, protection wont work
2405  // but mprotect will still return 0:
2406  //
2407  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2408
2409  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2410
2411  if (!rc) {
2412    const char* const s_errno = os::errno_name(errno);
2413    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2414    return false;
2415  }
2416
2417  // mprotect success check
2418  //
2419  // Mprotect said it changed the protection but can I believe it?
2420  //
2421  // To be sure I need to check the protection afterwards. Try to
2422  // read from protected memory and check whether that causes a segfault.
2423  //
2424  if (!os::Aix::xpg_sus_mode()) {
2425
2426    if (CanUseSafeFetch32()) {
2427
2428      const bool read_protected =
2429        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2430         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2431
2432      if (prot & PROT_READ) {
2433        rc = !read_protected;
2434      } else {
2435        rc = read_protected;
2436      }
2437
2438      if (!rc) {
2439        if (os::Aix::on_pase()) {
2440          // There is an issue on older PASE systems where mprotect() will return success but the
2441          // memory will not be protected.
2442          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2443          // machines; we only see it rarely, when using mprotect() to protect the guard page of
2444          // a stack. It is an OS error.
2445          //
2446          // A valid strategy is just to try again. This usually works. :-/
2447
2448          ::usleep(1000);
2449          if (::mprotect(addr, size, prot) == 0) {
2450            const bool read_protected_2 =
2451              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2452              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2453            rc = true;
2454          }
2455        }
2456      }
2457    }
2458  }
2459
2460  assert(rc == true, "mprotect failed.");
2461
2462  return rc;
2463}
2464
2465// Set protections specified
2466bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2467  unsigned int p = 0;
2468  switch (prot) {
2469  case MEM_PROT_NONE: p = PROT_NONE; break;
2470  case MEM_PROT_READ: p = PROT_READ; break;
2471  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2472  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2473  default:
2474    ShouldNotReachHere();
2475  }
2476  // is_committed is unused.
2477  return checked_mprotect(addr, size, p);
2478}
2479
2480bool os::guard_memory(char* addr, size_t size) {
2481  return checked_mprotect(addr, size, PROT_NONE);
2482}
2483
2484bool os::unguard_memory(char* addr, size_t size) {
2485  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2486}
2487
2488// Large page support
2489
2490static size_t _large_page_size = 0;
2491
2492// Enable large page support if OS allows that.
2493void os::large_page_init() {
2494  return; // Nothing to do. See query_multipage_support and friends.
2495}
2496
2497char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2498  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2499  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2500  // so this is not needed.
2501  assert(false, "should not be called on AIX");
2502  return NULL;
2503}
2504
2505bool os::release_memory_special(char* base, size_t bytes) {
2506  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2507  Unimplemented();
2508  return false;
2509}
2510
2511size_t os::large_page_size() {
2512  return _large_page_size;
2513}
2514
2515bool os::can_commit_large_page_memory() {
2516  // Does not matter, we do not support huge pages.
2517  return false;
2518}
2519
2520bool os::can_execute_large_page_memory() {
2521  // Does not matter, we do not support huge pages.
2522  return false;
2523}
2524
2525// Reserve memory at an arbitrary address, only if that area is
2526// available (and not reserved for something else).
2527char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2528  char* addr = NULL;
2529
2530  // Always round to os::vm_page_size(), which may be larger than 4K.
2531  bytes = align_up(bytes, os::vm_page_size());
2532
2533  // In 4K mode always use mmap.
2534  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2535  if (os::vm_page_size() == 4*K) {
2536    return reserve_mmaped_memory(bytes, requested_addr, 0);
2537  } else {
2538    if (bytes >= Use64KPagesThreshold) {
2539      return reserve_shmated_memory(bytes, requested_addr, 0);
2540    } else {
2541      return reserve_mmaped_memory(bytes, requested_addr, 0);
2542    }
2543  }
2544
2545  return addr;
2546}
2547
2548size_t os::read(int fd, void *buf, unsigned int nBytes) {
2549  return ::read(fd, buf, nBytes);
2550}
2551
2552size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2553  return ::pread(fd, buf, nBytes, offset);
2554}
2555
2556void os::naked_short_sleep(jlong ms) {
2557  struct timespec req;
2558
2559  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2560  req.tv_sec = 0;
2561  if (ms > 0) {
2562    req.tv_nsec = (ms % 1000) * 1000000;
2563  }
2564  else {
2565    req.tv_nsec = 1;
2566  }
2567
2568  nanosleep(&req, NULL);
2569
2570  return;
2571}
2572
2573// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2574void os::infinite_sleep() {
2575  while (true) {    // sleep forever ...
2576    ::sleep(100);   // ... 100 seconds at a time
2577  }
2578}
2579
2580// Used to convert frequent JVM_Yield() to nops
2581bool os::dont_yield() {
2582  return DontYieldALot;
2583}
2584
2585void os::naked_yield() {
2586  sched_yield();
2587}
2588
2589////////////////////////////////////////////////////////////////////////////////
2590// thread priority support
2591
2592// From AIX manpage to pthread_setschedparam
2593// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2594//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2595//
2596// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2597// range from 40 to 80, where 40 is the least favored priority and 80
2598// is the most favored."
2599//
2600// (Actually, I doubt this even has an impact on AIX, as we do kernel
2601// scheduling there; however, this still leaves iSeries.)
2602//
2603// We use the same values for AIX and PASE.
2604int os::java_to_os_priority[CriticalPriority + 1] = {
2605  54,             // 0 Entry should never be used
2606
2607  55,             // 1 MinPriority
2608  55,             // 2
2609  56,             // 3
2610
2611  56,             // 4
2612  57,             // 5 NormPriority
2613  57,             // 6
2614
2615  58,             // 7
2616  58,             // 8
2617  59,             // 9 NearMaxPriority
2618
2619  60,             // 10 MaxPriority
2620
2621  60              // 11 CriticalPriority
2622};
2623
2624OSReturn os::set_native_priority(Thread* thread, int newpri) {
2625  if (!UseThreadPriorities) return OS_OK;
2626  pthread_t thr = thread->osthread()->pthread_id();
2627  int policy = SCHED_OTHER;
2628  struct sched_param param;
2629  param.sched_priority = newpri;
2630  int ret = pthread_setschedparam(thr, policy, &param);
2631
2632  if (ret != 0) {
2633    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2634        (int)thr, newpri, ret, os::errno_name(ret));
2635  }
2636  return (ret == 0) ? OS_OK : OS_ERR;
2637}
2638
2639OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2640  if (!UseThreadPriorities) {
2641    *priority_ptr = java_to_os_priority[NormPriority];
2642    return OS_OK;
2643  }
2644  pthread_t thr = thread->osthread()->pthread_id();
2645  int policy = SCHED_OTHER;
2646  struct sched_param param;
2647  int ret = pthread_getschedparam(thr, &policy, &param);
2648  *priority_ptr = param.sched_priority;
2649
2650  return (ret == 0) ? OS_OK : OS_ERR;
2651}
2652
2653// Hint to the underlying OS that a task switch would not be good.
2654// Void return because it's a hint and can fail.
2655void os::hint_no_preempt() {}
2656
2657////////////////////////////////////////////////////////////////////////////////
2658// suspend/resume support
2659
2660//  the low-level signal-based suspend/resume support is a remnant from the
2661//  old VM-suspension that used to be for java-suspension, safepoints etc,
2662//  within hotspot. Now there is a single use-case for this:
2663//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2664//      that runs in the watcher thread.
2665//  The remaining code is greatly simplified from the more general suspension
2666//  code that used to be used.
2667//
2668//  The protocol is quite simple:
2669//  - suspend:
2670//      - sends a signal to the target thread
2671//      - polls the suspend state of the osthread using a yield loop
2672//      - target thread signal handler (SR_handler) sets suspend state
2673//        and blocks in sigsuspend until continued
2674//  - resume:
2675//      - sets target osthread state to continue
2676//      - sends signal to end the sigsuspend loop in the SR_handler
2677//
2678//  Note that the SR_lock plays no role in this suspend/resume protocol,
2679//  but is checked for NULL in SR_handler as a thread termination indicator.
2680//
2681
2682static void resume_clear_context(OSThread *osthread) {
2683  osthread->set_ucontext(NULL);
2684  osthread->set_siginfo(NULL);
2685}
2686
2687static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2688  osthread->set_ucontext(context);
2689  osthread->set_siginfo(siginfo);
2690}
2691
2692//
2693// Handler function invoked when a thread's execution is suspended or
2694// resumed. We have to be careful that only async-safe functions are
2695// called here (Note: most pthread functions are not async safe and
2696// should be avoided.)
2697//
2698// Note: sigwait() is a more natural fit than sigsuspend() from an
2699// interface point of view, but sigwait() prevents the signal hander
2700// from being run. libpthread would get very confused by not having
2701// its signal handlers run and prevents sigwait()'s use with the
2702// mutex granting granting signal.
2703//
2704// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2705//
2706static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2707  // Save and restore errno to avoid confusing native code with EINTR
2708  // after sigsuspend.
2709  int old_errno = errno;
2710
2711  Thread* thread = Thread::current_or_null_safe();
2712  assert(thread != NULL, "Missing current thread in SR_handler");
2713
2714  // On some systems we have seen signal delivery get "stuck" until the signal
2715  // mask is changed as part of thread termination. Check that the current thread
2716  // has not already terminated (via SR_lock()) - else the following assertion
2717  // will fail because the thread is no longer a JavaThread as the ~JavaThread
2718  // destructor has completed.
2719
2720  if (thread->SR_lock() == NULL) {
2721    return;
2722  }
2723
2724  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2725
2726  OSThread* osthread = thread->osthread();
2727
2728  os::SuspendResume::State current = osthread->sr.state();
2729  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2730    suspend_save_context(osthread, siginfo, context);
2731
2732    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2733    os::SuspendResume::State state = osthread->sr.suspended();
2734    if (state == os::SuspendResume::SR_SUSPENDED) {
2735      sigset_t suspend_set;  // signals for sigsuspend()
2736
2737      // get current set of blocked signals and unblock resume signal
2738      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2739      sigdelset(&suspend_set, SR_signum);
2740
2741      // wait here until we are resumed
2742      while (1) {
2743        sigsuspend(&suspend_set);
2744
2745        os::SuspendResume::State result = osthread->sr.running();
2746        if (result == os::SuspendResume::SR_RUNNING) {
2747          break;
2748        }
2749      }
2750
2751    } else if (state == os::SuspendResume::SR_RUNNING) {
2752      // request was cancelled, continue
2753    } else {
2754      ShouldNotReachHere();
2755    }
2756
2757    resume_clear_context(osthread);
2758  } else if (current == os::SuspendResume::SR_RUNNING) {
2759    // request was cancelled, continue
2760  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2761    // ignore
2762  } else {
2763    ShouldNotReachHere();
2764  }
2765
2766  errno = old_errno;
2767}
2768
2769static int SR_initialize() {
2770  struct sigaction act;
2771  char *s;
2772  // Get signal number to use for suspend/resume
2773  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2774    int sig = ::strtol(s, 0, 10);
2775    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2776        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2777      SR_signum = sig;
2778    } else {
2779      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2780              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2781    }
2782  }
2783
2784  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2785        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2786
2787  sigemptyset(&SR_sigset);
2788  sigaddset(&SR_sigset, SR_signum);
2789
2790  // Set up signal handler for suspend/resume.
2791  act.sa_flags = SA_RESTART|SA_SIGINFO;
2792  act.sa_handler = (void (*)(int)) SR_handler;
2793
2794  // SR_signum is blocked by default.
2795  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2796
2797  if (sigaction(SR_signum, &act, 0) == -1) {
2798    return -1;
2799  }
2800
2801  // Save signal flag
2802  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2803  return 0;
2804}
2805
2806static int SR_finalize() {
2807  return 0;
2808}
2809
2810static int sr_notify(OSThread* osthread) {
2811  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2812  assert_status(status == 0, status, "pthread_kill");
2813  return status;
2814}
2815
2816// "Randomly" selected value for how long we want to spin
2817// before bailing out on suspending a thread, also how often
2818// we send a signal to a thread we want to resume
2819static const int RANDOMLY_LARGE_INTEGER = 1000000;
2820static const int RANDOMLY_LARGE_INTEGER2 = 100;
2821
2822// returns true on success and false on error - really an error is fatal
2823// but this seems the normal response to library errors
2824static bool do_suspend(OSThread* osthread) {
2825  assert(osthread->sr.is_running(), "thread should be running");
2826  // mark as suspended and send signal
2827
2828  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2829    // failed to switch, state wasn't running?
2830    ShouldNotReachHere();
2831    return false;
2832  }
2833
2834  if (sr_notify(osthread) != 0) {
2835    // try to cancel, switch to running
2836
2837    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2838    if (result == os::SuspendResume::SR_RUNNING) {
2839      // cancelled
2840      return false;
2841    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2842      // somehow managed to suspend
2843      return true;
2844    } else {
2845      ShouldNotReachHere();
2846      return false;
2847    }
2848  }
2849
2850  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2851
2852  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2853    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2854      os::naked_yield();
2855    }
2856
2857    // timeout, try to cancel the request
2858    if (n >= RANDOMLY_LARGE_INTEGER) {
2859      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2860      if (cancelled == os::SuspendResume::SR_RUNNING) {
2861        return false;
2862      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2863        return true;
2864      } else {
2865        ShouldNotReachHere();
2866        return false;
2867      }
2868    }
2869  }
2870
2871  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2872  return true;
2873}
2874
2875static void do_resume(OSThread* osthread) {
2876  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2877
2878  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2879    // failed to switch to WAKEUP_REQUEST
2880    ShouldNotReachHere();
2881    return;
2882  }
2883
2884  while (!osthread->sr.is_running()) {
2885    if (sr_notify(osthread) == 0) {
2886      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2887        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2888          os::naked_yield();
2889        }
2890      }
2891    } else {
2892      ShouldNotReachHere();
2893    }
2894  }
2895
2896  guarantee(osthread->sr.is_running(), "Must be running!");
2897}
2898
2899///////////////////////////////////////////////////////////////////////////////////
2900// signal handling (except suspend/resume)
2901
2902// This routine may be used by user applications as a "hook" to catch signals.
2903// The user-defined signal handler must pass unrecognized signals to this
2904// routine, and if it returns true (non-zero), then the signal handler must
2905// return immediately. If the flag "abort_if_unrecognized" is true, then this
2906// routine will never retun false (zero), but instead will execute a VM panic
2907// routine kill the process.
2908//
2909// If this routine returns false, it is OK to call it again. This allows
2910// the user-defined signal handler to perform checks either before or after
2911// the VM performs its own checks. Naturally, the user code would be making
2912// a serious error if it tried to handle an exception (such as a null check
2913// or breakpoint) that the VM was generating for its own correct operation.
2914//
2915// This routine may recognize any of the following kinds of signals:
2916//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2917// It should be consulted by handlers for any of those signals.
2918//
2919// The caller of this routine must pass in the three arguments supplied
2920// to the function referred to in the "sa_sigaction" (not the "sa_handler")
2921// field of the structure passed to sigaction(). This routine assumes that
2922// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2923//
2924// Note that the VM will print warnings if it detects conflicting signal
2925// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2926//
2927extern "C" JNIEXPORT int
2928JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2929
2930// Set thread signal mask (for some reason on AIX sigthreadmask() seems
2931// to be the thing to call; documentation is not terribly clear about whether
2932// pthread_sigmask also works, and if it does, whether it does the same.
2933bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2934  const int rc = ::pthread_sigmask(how, set, oset);
2935  // return value semantics differ slightly for error case:
2936  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2937  // (so, pthread_sigmask is more theadsafe for error handling)
2938  // But success is always 0.
2939  return rc == 0 ? true : false;
2940}
2941
2942// Function to unblock all signals which are, according
2943// to POSIX, typical program error signals. If they happen while being blocked,
2944// they typically will bring down the process immediately.
2945bool unblock_program_error_signals() {
2946  sigset_t set;
2947  ::sigemptyset(&set);
2948  ::sigaddset(&set, SIGILL);
2949  ::sigaddset(&set, SIGBUS);
2950  ::sigaddset(&set, SIGFPE);
2951  ::sigaddset(&set, SIGSEGV);
2952  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2953}
2954
2955// Renamed from 'signalHandler' to avoid collision with other shared libs.
2956void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2957  assert(info != NULL && uc != NULL, "it must be old kernel");
2958
2959  // Never leave program error signals blocked;
2960  // on all our platforms they would bring down the process immediately when
2961  // getting raised while being blocked.
2962  unblock_program_error_signals();
2963
2964  int orig_errno = errno;  // Preserve errno value over signal handler.
2965  JVM_handle_aix_signal(sig, info, uc, true);
2966  errno = orig_errno;
2967}
2968
2969// This boolean allows users to forward their own non-matching signals
2970// to JVM_handle_aix_signal, harmlessly.
2971bool os::Aix::signal_handlers_are_installed = false;
2972
2973// For signal-chaining
2974struct sigaction sigact[NSIG];
2975sigset_t sigs;
2976bool os::Aix::libjsig_is_loaded = false;
2977typedef struct sigaction *(*get_signal_t)(int);
2978get_signal_t os::Aix::get_signal_action = NULL;
2979
2980struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2981  struct sigaction *actp = NULL;
2982
2983  if (libjsig_is_loaded) {
2984    // Retrieve the old signal handler from libjsig
2985    actp = (*get_signal_action)(sig);
2986  }
2987  if (actp == NULL) {
2988    // Retrieve the preinstalled signal handler from jvm
2989    actp = get_preinstalled_handler(sig);
2990  }
2991
2992  return actp;
2993}
2994
2995static bool call_chained_handler(struct sigaction *actp, int sig,
2996                                 siginfo_t *siginfo, void *context) {
2997  // Call the old signal handler
2998  if (actp->sa_handler == SIG_DFL) {
2999    // It's more reasonable to let jvm treat it as an unexpected exception
3000    // instead of taking the default action.
3001    return false;
3002  } else if (actp->sa_handler != SIG_IGN) {
3003    if ((actp->sa_flags & SA_NODEFER) == 0) {
3004      // automaticlly block the signal
3005      sigaddset(&(actp->sa_mask), sig);
3006    }
3007
3008    sa_handler_t hand = NULL;
3009    sa_sigaction_t sa = NULL;
3010    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3011    // retrieve the chained handler
3012    if (siginfo_flag_set) {
3013      sa = actp->sa_sigaction;
3014    } else {
3015      hand = actp->sa_handler;
3016    }
3017
3018    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3019      actp->sa_handler = SIG_DFL;
3020    }
3021
3022    // try to honor the signal mask
3023    sigset_t oset;
3024    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3025
3026    // call into the chained handler
3027    if (siginfo_flag_set) {
3028      (*sa)(sig, siginfo, context);
3029    } else {
3030      (*hand)(sig);
3031    }
3032
3033    // restore the signal mask
3034    pthread_sigmask(SIG_SETMASK, &oset, 0);
3035  }
3036  // Tell jvm's signal handler the signal is taken care of.
3037  return true;
3038}
3039
3040bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3041  bool chained = false;
3042  // signal-chaining
3043  if (UseSignalChaining) {
3044    struct sigaction *actp = get_chained_signal_action(sig);
3045    if (actp != NULL) {
3046      chained = call_chained_handler(actp, sig, siginfo, context);
3047    }
3048  }
3049  return chained;
3050}
3051
3052size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3053  // Creating guard page is very expensive. Java thread has HotSpot
3054  // guard pages, only enable glibc guard page for non-Java threads.
3055  // (Remember: compiler thread is a Java thread, too!)
3056  //
3057  // Aix can have different page sizes for stack (4K) and heap (64K).
3058  // As Hotspot knows only one page size, we assume the stack has
3059  // the same page size as the heap. Returning page_size() here can
3060  // cause 16 guard pages which we want to avoid.  Thus we return 4K
3061  // which will be rounded to the real page size by the OS.
3062  return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3063}
3064
3065struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3066  if (sigismember(&sigs, sig)) {
3067    return &sigact[sig];
3068  }
3069  return NULL;
3070}
3071
3072void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3073  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3074  sigact[sig] = oldAct;
3075  sigaddset(&sigs, sig);
3076}
3077
3078// for diagnostic
3079int sigflags[NSIG];
3080
3081int os::Aix::get_our_sigflags(int sig) {
3082  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3083  return sigflags[sig];
3084}
3085
3086void os::Aix::set_our_sigflags(int sig, int flags) {
3087  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3088  if (sig > 0 && sig < NSIG) {
3089    sigflags[sig] = flags;
3090  }
3091}
3092
3093void os::Aix::set_signal_handler(int sig, bool set_installed) {
3094  // Check for overwrite.
3095  struct sigaction oldAct;
3096  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3097
3098  void* oldhand = oldAct.sa_sigaction
3099    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3100    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3101  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3102      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3103      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3104    if (AllowUserSignalHandlers || !set_installed) {
3105      // Do not overwrite; user takes responsibility to forward to us.
3106      return;
3107    } else if (UseSignalChaining) {
3108      // save the old handler in jvm
3109      save_preinstalled_handler(sig, oldAct);
3110      // libjsig also interposes the sigaction() call below and saves the
3111      // old sigaction on it own.
3112    } else {
3113      fatal("Encountered unexpected pre-existing sigaction handler "
3114            "%#lx for signal %d.", (long)oldhand, sig);
3115    }
3116  }
3117
3118  struct sigaction sigAct;
3119  sigfillset(&(sigAct.sa_mask));
3120  if (!set_installed) {
3121    sigAct.sa_handler = SIG_DFL;
3122    sigAct.sa_flags = SA_RESTART;
3123  } else {
3124    sigAct.sa_sigaction = javaSignalHandler;
3125    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3126  }
3127  // Save flags, which are set by ours
3128  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3129  sigflags[sig] = sigAct.sa_flags;
3130
3131  int ret = sigaction(sig, &sigAct, &oldAct);
3132  assert(ret == 0, "check");
3133
3134  void* oldhand2 = oldAct.sa_sigaction
3135                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3136                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3137  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3138}
3139
3140// install signal handlers for signals that HotSpot needs to
3141// handle in order to support Java-level exception handling.
3142void os::Aix::install_signal_handlers() {
3143  if (!signal_handlers_are_installed) {
3144    signal_handlers_are_installed = true;
3145
3146    // signal-chaining
3147    typedef void (*signal_setting_t)();
3148    signal_setting_t begin_signal_setting = NULL;
3149    signal_setting_t end_signal_setting = NULL;
3150    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3151                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3152    if (begin_signal_setting != NULL) {
3153      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3154                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3155      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3156                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3157      libjsig_is_loaded = true;
3158      assert(UseSignalChaining, "should enable signal-chaining");
3159    }
3160    if (libjsig_is_loaded) {
3161      // Tell libjsig jvm is setting signal handlers.
3162      (*begin_signal_setting)();
3163    }
3164
3165    ::sigemptyset(&sigs);
3166    set_signal_handler(SIGSEGV, true);
3167    set_signal_handler(SIGPIPE, true);
3168    set_signal_handler(SIGBUS, true);
3169    set_signal_handler(SIGILL, true);
3170    set_signal_handler(SIGFPE, true);
3171    set_signal_handler(SIGTRAP, true);
3172    set_signal_handler(SIGXFSZ, true);
3173
3174    if (libjsig_is_loaded) {
3175      // Tell libjsig jvm finishes setting signal handlers.
3176      (*end_signal_setting)();
3177    }
3178
3179    // We don't activate signal checker if libjsig is in place, we trust ourselves
3180    // and if UserSignalHandler is installed all bets are off.
3181    // Log that signal checking is off only if -verbose:jni is specified.
3182    if (CheckJNICalls) {
3183      if (libjsig_is_loaded) {
3184        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3185        check_signals = false;
3186      }
3187      if (AllowUserSignalHandlers) {
3188        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3189        check_signals = false;
3190      }
3191      // Need to initialize check_signal_done.
3192      ::sigemptyset(&check_signal_done);
3193    }
3194  }
3195}
3196
3197static const char* get_signal_handler_name(address handler,
3198                                           char* buf, int buflen) {
3199  int offset;
3200  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3201  if (found) {
3202    // skip directory names
3203    const char *p1, *p2;
3204    p1 = buf;
3205    size_t len = strlen(os::file_separator());
3206    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3207    // The way os::dll_address_to_library_name is implemented on Aix
3208    // right now, it always returns -1 for the offset which is not
3209    // terribly informative.
3210    // Will fix that. For now, omit the offset.
3211    jio_snprintf(buf, buflen, "%s", p1);
3212  } else {
3213    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3214  }
3215  return buf;
3216}
3217
3218static void print_signal_handler(outputStream* st, int sig,
3219                                 char* buf, size_t buflen) {
3220  struct sigaction sa;
3221  sigaction(sig, NULL, &sa);
3222
3223  st->print("%s: ", os::exception_name(sig, buf, buflen));
3224
3225  address handler = (sa.sa_flags & SA_SIGINFO)
3226    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3227    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3228
3229  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3230    st->print("SIG_DFL");
3231  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3232    st->print("SIG_IGN");
3233  } else {
3234    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3235  }
3236
3237  // Print readable mask.
3238  st->print(", sa_mask[0]=");
3239  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3240
3241  address rh = VMError::get_resetted_sighandler(sig);
3242  // May be, handler was resetted by VMError?
3243  if (rh != NULL) {
3244    handler = rh;
3245    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3246  }
3247
3248  // Print textual representation of sa_flags.
3249  st->print(", sa_flags=");
3250  os::Posix::print_sa_flags(st, sa.sa_flags);
3251
3252  // Check: is it our handler?
3253  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3254      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3255    // It is our signal handler.
3256    // Check for flags, reset system-used one!
3257    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3258      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3259                os::Aix::get_our_sigflags(sig));
3260    }
3261  }
3262  st->cr();
3263}
3264
3265#define DO_SIGNAL_CHECK(sig) \
3266  if (!sigismember(&check_signal_done, sig)) \
3267    os::Aix::check_signal_handler(sig)
3268
3269// This method is a periodic task to check for misbehaving JNI applications
3270// under CheckJNI, we can add any periodic checks here
3271
3272void os::run_periodic_checks() {
3273
3274  if (check_signals == false) return;
3275
3276  // SEGV and BUS if overridden could potentially prevent
3277  // generation of hs*.log in the event of a crash, debugging
3278  // such a case can be very challenging, so we absolutely
3279  // check the following for a good measure:
3280  DO_SIGNAL_CHECK(SIGSEGV);
3281  DO_SIGNAL_CHECK(SIGILL);
3282  DO_SIGNAL_CHECK(SIGFPE);
3283  DO_SIGNAL_CHECK(SIGBUS);
3284  DO_SIGNAL_CHECK(SIGPIPE);
3285  DO_SIGNAL_CHECK(SIGXFSZ);
3286  if (UseSIGTRAP) {
3287    DO_SIGNAL_CHECK(SIGTRAP);
3288  }
3289
3290  // ReduceSignalUsage allows the user to override these handlers
3291  // see comments at the very top and jvm_solaris.h
3292  if (!ReduceSignalUsage) {
3293    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3294    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3295    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3296    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3297  }
3298
3299  DO_SIGNAL_CHECK(SR_signum);
3300}
3301
3302typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3303
3304static os_sigaction_t os_sigaction = NULL;
3305
3306void os::Aix::check_signal_handler(int sig) {
3307  char buf[O_BUFLEN];
3308  address jvmHandler = NULL;
3309
3310  struct sigaction act;
3311  if (os_sigaction == NULL) {
3312    // only trust the default sigaction, in case it has been interposed
3313    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3314    if (os_sigaction == NULL) return;
3315  }
3316
3317  os_sigaction(sig, (struct sigaction*)NULL, &act);
3318
3319  address thisHandler = (act.sa_flags & SA_SIGINFO)
3320    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3321    : CAST_FROM_FN_PTR(address, act.sa_handler);
3322
3323  switch(sig) {
3324  case SIGSEGV:
3325  case SIGBUS:
3326  case SIGFPE:
3327  case SIGPIPE:
3328  case SIGILL:
3329  case SIGXFSZ:
3330    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3331    break;
3332
3333  case SHUTDOWN1_SIGNAL:
3334  case SHUTDOWN2_SIGNAL:
3335  case SHUTDOWN3_SIGNAL:
3336  case BREAK_SIGNAL:
3337    jvmHandler = (address)user_handler();
3338    break;
3339
3340  default:
3341    if (sig == SR_signum) {
3342      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3343    } else {
3344      return;
3345    }
3346    break;
3347  }
3348
3349  if (thisHandler != jvmHandler) {
3350    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3351    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3352    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3353    // No need to check this sig any longer
3354    sigaddset(&check_signal_done, sig);
3355    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3356    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3357      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3358                    exception_name(sig, buf, O_BUFLEN));
3359    }
3360  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3361    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3362    tty->print("expected:");
3363    os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3364    tty->cr();
3365    tty->print("  found:");
3366    os::Posix::print_sa_flags(tty, act.sa_flags);
3367    tty->cr();
3368    // No need to check this sig any longer
3369    sigaddset(&check_signal_done, sig);
3370  }
3371
3372  // Dump all the signal
3373  if (sigismember(&check_signal_done, sig)) {
3374    print_signal_handlers(tty, buf, O_BUFLEN);
3375  }
3376}
3377
3378// To install functions for atexit system call
3379extern "C" {
3380  static void perfMemory_exit_helper() {
3381    perfMemory_exit();
3382  }
3383}
3384
3385// This is called _before_ the most of global arguments have been parsed.
3386void os::init(void) {
3387  // This is basic, we want to know if that ever changes.
3388  // (Shared memory boundary is supposed to be a 256M aligned.)
3389  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3390
3391  // Record process break at startup.
3392  g_brk_at_startup = (address) ::sbrk(0);
3393  assert(g_brk_at_startup != (address) -1, "sbrk failed");
3394
3395  // First off, we need to know whether we run on AIX or PASE, and
3396  // the OS level we run on.
3397  os::Aix::initialize_os_info();
3398
3399  // Scan environment (SPEC1170 behaviour, etc).
3400  os::Aix::scan_environment();
3401
3402  // Probe multipage support.
3403  query_multipage_support();
3404
3405  // Act like we only have one page size by eliminating corner cases which
3406  // we did not support very well anyway.
3407  // We have two input conditions:
3408  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3409  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3410  //    setting.
3411  //    Data segment page size is important for us because it defines the thread stack page
3412  //    size, which is needed for guard page handling, stack banging etc.
3413  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3414  //    and should be allocated with 64k pages.
3415  //
3416  // So, we do the following:
3417  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3418  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3419  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3420  // 64k          no              --- AIX 5.2 ? ---
3421  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3422
3423  // We explicitly leave no option to change page size, because only upgrading would work,
3424  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3425
3426  if (g_multipage_support.datapsize == 4*K) {
3427    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3428    if (g_multipage_support.can_use_64K_pages) {
3429      // .. but we are able to use 64K pages dynamically.
3430      // This would be typical for java launchers which are not linked
3431      // with datapsize=64K (like, any other launcher but our own).
3432      //
3433      // In this case it would be smart to allocate the java heap with 64K
3434      // to get the performance benefit, and to fake 64k pages for the
3435      // data segment (when dealing with thread stacks).
3436      //
3437      // However, leave a possibility to downgrade to 4K, using
3438      // -XX:-Use64KPages.
3439      if (Use64KPages) {
3440        trcVerbose("64K page mode (faked for data segment)");
3441        Aix::_page_size = 64*K;
3442      } else {
3443        trcVerbose("4K page mode (Use64KPages=off)");
3444        Aix::_page_size = 4*K;
3445      }
3446    } else {
3447      // .. and not able to allocate 64k pages dynamically. Here, just
3448      // fall back to 4K paged mode and use mmap for everything.
3449      trcVerbose("4K page mode");
3450      Aix::_page_size = 4*K;
3451      FLAG_SET_ERGO(bool, Use64KPages, false);
3452    }
3453  } else {
3454    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3455    // This normally means that we can allocate 64k pages dynamically.
3456    // (There is one special case where this may be false: EXTSHM=on.
3457    // but we decided to not support that mode).
3458    assert0(g_multipage_support.can_use_64K_pages);
3459    Aix::_page_size = 64*K;
3460    trcVerbose("64K page mode");
3461    FLAG_SET_ERGO(bool, Use64KPages, true);
3462  }
3463
3464  // For now UseLargePages is just ignored.
3465  FLAG_SET_ERGO(bool, UseLargePages, false);
3466  _page_sizes[0] = 0;
3467
3468  // debug trace
3469  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3470
3471  // Next, we need to initialize libo4 and libperfstat libraries.
3472  if (os::Aix::on_pase()) {
3473    os::Aix::initialize_libo4();
3474  } else {
3475    os::Aix::initialize_libperfstat();
3476  }
3477
3478  // Reset the perfstat information provided by ODM.
3479  if (os::Aix::on_aix()) {
3480    libperfstat::perfstat_reset();
3481  }
3482
3483  // Now initialze basic system properties. Note that for some of the values we
3484  // need libperfstat etc.
3485  os::Aix::initialize_system_info();
3486
3487  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3488
3489  init_random(1234567);
3490
3491  ThreadCritical::initialize();
3492
3493  // Main_thread points to the aboriginal thread.
3494  Aix::_main_thread = pthread_self();
3495
3496  initial_time_count = os::elapsed_counter();
3497
3498  os::Posix::init();
3499}
3500
3501// This is called _after_ the global arguments have been parsed.
3502jint os::init_2(void) {
3503
3504  os::Posix::init_2();
3505
3506  if (os::Aix::on_pase()) {
3507    trcVerbose("Running on PASE.");
3508  } else {
3509    trcVerbose("Running on AIX (not PASE).");
3510  }
3511
3512  trcVerbose("processor count: %d", os::_processor_count);
3513  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3514
3515  // Initially build up the loaded dll map.
3516  LoadedLibraries::reload();
3517  if (Verbose) {
3518    trcVerbose("Loaded Libraries: ");
3519    LoadedLibraries::print(tty);
3520  }
3521
3522  const int page_size = Aix::page_size();
3523  const int map_size = page_size;
3524
3525  address map_address = (address) MAP_FAILED;
3526  const int prot  = PROT_READ;
3527  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3528
3529  // Use optimized addresses for the polling page,
3530  // e.g. map it to a special 32-bit address.
3531  if (OptimizePollingPageLocation) {
3532    // architecture-specific list of address wishes:
3533    address address_wishes[] = {
3534      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3535      // PPC64: all address wishes are non-negative 32 bit values where
3536      // the lower 16 bits are all zero. we can load these addresses
3537      // with a single ppc_lis instruction.
3538      (address) 0x30000000, (address) 0x31000000,
3539      (address) 0x32000000, (address) 0x33000000,
3540      (address) 0x40000000, (address) 0x41000000,
3541      (address) 0x42000000, (address) 0x43000000,
3542      (address) 0x50000000, (address) 0x51000000,
3543      (address) 0x52000000, (address) 0x53000000,
3544      (address) 0x60000000, (address) 0x61000000,
3545      (address) 0x62000000, (address) 0x63000000
3546    };
3547    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3548
3549    // iterate over the list of address wishes:
3550    for (int i=0; i<address_wishes_length; i++) {
3551      // Try to map with current address wish.
3552      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3553      // fail if the address is already mapped.
3554      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3555                                     map_size, prot,
3556                                     flags | MAP_FIXED,
3557                                     -1, 0);
3558      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3559                   address_wishes[i], map_address + (ssize_t)page_size);
3560
3561      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3562        // Map succeeded and map_address is at wished address, exit loop.
3563        break;
3564      }
3565
3566      if (map_address != (address) MAP_FAILED) {
3567        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3568        ::munmap(map_address, map_size);
3569        map_address = (address) MAP_FAILED;
3570      }
3571      // Map failed, continue loop.
3572    }
3573  } // end OptimizePollingPageLocation
3574
3575  if (map_address == (address) MAP_FAILED) {
3576    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3577  }
3578  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3579  os::set_polling_page(map_address);
3580
3581  if (!UseMembar) {
3582    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3583    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3584    os::set_memory_serialize_page(mem_serialize_page);
3585
3586    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3587        mem_serialize_page, mem_serialize_page + Aix::page_size(),
3588        Aix::page_size(), Aix::page_size());
3589  }
3590
3591  // initialize suspend/resume support - must do this before signal_sets_init()
3592  if (SR_initialize() != 0) {
3593    perror("SR_initialize failed");
3594    return JNI_ERR;
3595  }
3596
3597  Aix::signal_sets_init();
3598  Aix::install_signal_handlers();
3599
3600  // Check and sets minimum stack sizes against command line options
3601  if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3602    return JNI_ERR;
3603  }
3604
3605  if (UseNUMA) {
3606    UseNUMA = false;
3607    warning("NUMA optimizations are not available on this OS.");
3608  }
3609
3610  if (MaxFDLimit) {
3611    // Set the number of file descriptors to max. print out error
3612    // if getrlimit/setrlimit fails but continue regardless.
3613    struct rlimit nbr_files;
3614    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3615    if (status != 0) {
3616      log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3617    } else {
3618      nbr_files.rlim_cur = nbr_files.rlim_max;
3619      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3620      if (status != 0) {
3621        log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3622      }
3623    }
3624  }
3625
3626  if (PerfAllowAtExitRegistration) {
3627    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3628    // At exit functions can be delayed until process exit time, which
3629    // can be problematic for embedded VM situations. Embedded VMs should
3630    // call DestroyJavaVM() to assure that VM resources are released.
3631
3632    // Note: perfMemory_exit_helper atexit function may be removed in
3633    // the future if the appropriate cleanup code can be added to the
3634    // VM_Exit VMOperation's doit method.
3635    if (atexit(perfMemory_exit_helper) != 0) {
3636      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3637    }
3638  }
3639
3640  return JNI_OK;
3641}
3642
3643// Mark the polling page as unreadable
3644void os::make_polling_page_unreadable(void) {
3645  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3646    fatal("Could not disable polling page");
3647  }
3648};
3649
3650// Mark the polling page as readable
3651void os::make_polling_page_readable(void) {
3652  // Changed according to os_linux.cpp.
3653  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3654    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3655  }
3656};
3657
3658int os::active_processor_count() {
3659  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3660  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3661  return online_cpus;
3662}
3663
3664void os::set_native_thread_name(const char *name) {
3665  // Not yet implemented.
3666  return;
3667}
3668
3669bool os::distribute_processes(uint length, uint* distribution) {
3670  // Not yet implemented.
3671  return false;
3672}
3673
3674bool os::bind_to_processor(uint processor_id) {
3675  // Not yet implemented.
3676  return false;
3677}
3678
3679void os::SuspendedThreadTask::internal_do_task() {
3680  if (do_suspend(_thread->osthread())) {
3681    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3682    do_task(context);
3683    do_resume(_thread->osthread());
3684  }
3685}
3686
3687class PcFetcher : public os::SuspendedThreadTask {
3688public:
3689  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3690  ExtendedPC result();
3691protected:
3692  void do_task(const os::SuspendedThreadTaskContext& context);
3693private:
3694  ExtendedPC _epc;
3695};
3696
3697ExtendedPC PcFetcher::result() {
3698  guarantee(is_done(), "task is not done yet.");
3699  return _epc;
3700}
3701
3702void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3703  Thread* thread = context.thread();
3704  OSThread* osthread = thread->osthread();
3705  if (osthread->ucontext() != NULL) {
3706    _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3707  } else {
3708    // NULL context is unexpected, double-check this is the VMThread.
3709    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3710  }
3711}
3712
3713// Suspends the target using the signal mechanism and then grabs the PC before
3714// resuming the target. Used by the flat-profiler only
3715ExtendedPC os::get_thread_pc(Thread* thread) {
3716  // Make sure that it is called by the watcher for the VMThread.
3717  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3718  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3719
3720  PcFetcher fetcher(thread);
3721  fetcher.run();
3722  return fetcher.result();
3723}
3724
3725////////////////////////////////////////////////////////////////////////////////
3726// debug support
3727
3728bool os::find(address addr, outputStream* st) {
3729
3730  st->print(PTR_FORMAT ": ", addr);
3731
3732  loaded_module_t lm;
3733  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3734      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3735    st->print_cr("%s", lm.path);
3736    return true;
3737  }
3738
3739  return false;
3740}
3741
3742////////////////////////////////////////////////////////////////////////////////
3743// misc
3744
3745// This does not do anything on Aix. This is basically a hook for being
3746// able to use structured exception handling (thread-local exception filters)
3747// on, e.g., Win32.
3748void
3749os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3750                         JavaCallArguments* args, Thread* thread) {
3751  f(value, method, args, thread);
3752}
3753
3754void os::print_statistics() {
3755}
3756
3757bool os::message_box(const char* title, const char* message) {
3758  int i;
3759  fdStream err(defaultStream::error_fd());
3760  for (i = 0; i < 78; i++) err.print_raw("=");
3761  err.cr();
3762  err.print_raw_cr(title);
3763  for (i = 0; i < 78; i++) err.print_raw("-");
3764  err.cr();
3765  err.print_raw_cr(message);
3766  for (i = 0; i < 78; i++) err.print_raw("=");
3767  err.cr();
3768
3769  char buf[16];
3770  // Prevent process from exiting upon "read error" without consuming all CPU
3771  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3772
3773  return buf[0] == 'y' || buf[0] == 'Y';
3774}
3775
3776int os::stat(const char *path, struct stat *sbuf) {
3777  char pathbuf[MAX_PATH];
3778  if (strlen(path) > MAX_PATH - 1) {
3779    errno = ENAMETOOLONG;
3780    return -1;
3781  }
3782  os::native_path(strcpy(pathbuf, path));
3783  return ::stat(pathbuf, sbuf);
3784}
3785
3786// Is a (classpath) directory empty?
3787bool os::dir_is_empty(const char* path) {
3788  DIR *dir = NULL;
3789  struct dirent *ptr;
3790
3791  dir = opendir(path);
3792  if (dir == NULL) return true;
3793
3794  /* Scan the directory */
3795  bool result = true;
3796  char buf[sizeof(struct dirent) + MAX_PATH];
3797  while (result && (ptr = ::readdir(dir)) != NULL) {
3798    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3799      result = false;
3800    }
3801  }
3802  closedir(dir);
3803  return result;
3804}
3805
3806// This code originates from JDK's sysOpen and open64_w
3807// from src/solaris/hpi/src/system_md.c
3808
3809int os::open(const char *path, int oflag, int mode) {
3810
3811  if (strlen(path) > MAX_PATH - 1) {
3812    errno = ENAMETOOLONG;
3813    return -1;
3814  }
3815  int fd;
3816
3817  fd = ::open64(path, oflag, mode);
3818  if (fd == -1) return -1;
3819
3820  // If the open succeeded, the file might still be a directory.
3821  {
3822    struct stat64 buf64;
3823    int ret = ::fstat64(fd, &buf64);
3824    int st_mode = buf64.st_mode;
3825
3826    if (ret != -1) {
3827      if ((st_mode & S_IFMT) == S_IFDIR) {
3828        errno = EISDIR;
3829        ::close(fd);
3830        return -1;
3831      }
3832    } else {
3833      ::close(fd);
3834      return -1;
3835    }
3836  }
3837
3838  // All file descriptors that are opened in the JVM and not
3839  // specifically destined for a subprocess should have the
3840  // close-on-exec flag set. If we don't set it, then careless 3rd
3841  // party native code might fork and exec without closing all
3842  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3843  // UNIXProcess.c), and this in turn might:
3844  //
3845  // - cause end-of-file to fail to be detected on some file
3846  //   descriptors, resulting in mysterious hangs, or
3847  //
3848  // - might cause an fopen in the subprocess to fail on a system
3849  //   suffering from bug 1085341.
3850  //
3851  // (Yes, the default setting of the close-on-exec flag is a Unix
3852  // design flaw.)
3853  //
3854  // See:
3855  // 1085341: 32-bit stdio routines should support file descriptors >255
3856  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3857  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3858#ifdef FD_CLOEXEC
3859  {
3860    int flags = ::fcntl(fd, F_GETFD);
3861    if (flags != -1)
3862      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3863  }
3864#endif
3865
3866  return fd;
3867}
3868
3869// create binary file, rewriting existing file if required
3870int os::create_binary_file(const char* path, bool rewrite_existing) {
3871  int oflags = O_WRONLY | O_CREAT;
3872  if (!rewrite_existing) {
3873    oflags |= O_EXCL;
3874  }
3875  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3876}
3877
3878// return current position of file pointer
3879jlong os::current_file_offset(int fd) {
3880  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3881}
3882
3883// move file pointer to the specified offset
3884jlong os::seek_to_file_offset(int fd, jlong offset) {
3885  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3886}
3887
3888// This code originates from JDK's sysAvailable
3889// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3890
3891int os::available(int fd, jlong *bytes) {
3892  jlong cur, end;
3893  int mode;
3894  struct stat64 buf64;
3895
3896  if (::fstat64(fd, &buf64) >= 0) {
3897    mode = buf64.st_mode;
3898    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3899      int n;
3900      if (::ioctl(fd, FIONREAD, &n) >= 0) {
3901        *bytes = n;
3902        return 1;
3903      }
3904    }
3905  }
3906  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3907    return 0;
3908  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3909    return 0;
3910  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3911    return 0;
3912  }
3913  *bytes = end - cur;
3914  return 1;
3915}
3916
3917// Map a block of memory.
3918char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3919                        char *addr, size_t bytes, bool read_only,
3920                        bool allow_exec) {
3921  int prot;
3922  int flags = MAP_PRIVATE;
3923
3924  if (read_only) {
3925    prot = PROT_READ;
3926    flags = MAP_SHARED;
3927  } else {
3928    prot = PROT_READ | PROT_WRITE;
3929    flags = MAP_PRIVATE;
3930  }
3931
3932  if (allow_exec) {
3933    prot |= PROT_EXEC;
3934  }
3935
3936  if (addr != NULL) {
3937    flags |= MAP_FIXED;
3938  }
3939
3940  // Allow anonymous mappings if 'fd' is -1.
3941  if (fd == -1) {
3942    flags |= MAP_ANONYMOUS;
3943  }
3944
3945  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3946                                     fd, file_offset);
3947  if (mapped_address == MAP_FAILED) {
3948    return NULL;
3949  }
3950  return mapped_address;
3951}
3952
3953// Remap a block of memory.
3954char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3955                          char *addr, size_t bytes, bool read_only,
3956                          bool allow_exec) {
3957  // same as map_memory() on this OS
3958  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3959                        allow_exec);
3960}
3961
3962// Unmap a block of memory.
3963bool os::pd_unmap_memory(char* addr, size_t bytes) {
3964  return munmap(addr, bytes) == 0;
3965}
3966
3967// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3968// are used by JVM M&M and JVMTI to get user+sys or user CPU time
3969// of a thread.
3970//
3971// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3972// the fast estimate available on the platform.
3973
3974jlong os::current_thread_cpu_time() {
3975  // return user + sys since the cost is the same
3976  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3977  assert(n >= 0, "negative CPU time");
3978  return n;
3979}
3980
3981jlong os::thread_cpu_time(Thread* thread) {
3982  // consistent with what current_thread_cpu_time() returns
3983  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3984  assert(n >= 0, "negative CPU time");
3985  return n;
3986}
3987
3988jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3989  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3990  assert(n >= 0, "negative CPU time");
3991  return n;
3992}
3993
3994static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3995  bool error = false;
3996
3997  jlong sys_time = 0;
3998  jlong user_time = 0;
3999
4000  // Reimplemented using getthrds64().
4001  //
4002  // Works like this:
4003  // For the thread in question, get the kernel thread id. Then get the
4004  // kernel thread statistics using that id.
4005  //
4006  // This only works of course when no pthread scheduling is used,
4007  // i.e. there is a 1:1 relationship to kernel threads.
4008  // On AIX, see AIXTHREAD_SCOPE variable.
4009
4010  pthread_t pthtid = thread->osthread()->pthread_id();
4011
4012  // retrieve kernel thread id for the pthread:
4013  tid64_t tid = 0;
4014  struct __pthrdsinfo pinfo;
4015  // I just love those otherworldly IBM APIs which force me to hand down
4016  // dummy buffers for stuff I dont care for...
4017  char dummy[1];
4018  int dummy_size = sizeof(dummy);
4019  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4020                          dummy, &dummy_size) == 0) {
4021    tid = pinfo.__pi_tid;
4022  } else {
4023    tty->print_cr("pthread_getthrds_np failed.");
4024    error = true;
4025  }
4026
4027  // retrieve kernel timing info for that kernel thread
4028  if (!error) {
4029    struct thrdentry64 thrdentry;
4030    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4031      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4032      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4033    } else {
4034      tty->print_cr("pthread_getthrds_np failed.");
4035      error = true;
4036    }
4037  }
4038
4039  if (p_sys_time) {
4040    *p_sys_time = sys_time;
4041  }
4042
4043  if (p_user_time) {
4044    *p_user_time = user_time;
4045  }
4046
4047  if (error) {
4048    return false;
4049  }
4050
4051  return true;
4052}
4053
4054jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4055  jlong sys_time;
4056  jlong user_time;
4057
4058  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4059    return -1;
4060  }
4061
4062  return user_sys_cpu_time ? sys_time + user_time : user_time;
4063}
4064
4065void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4066  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4067  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4068  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4069  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4070}
4071
4072void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4073  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4074  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4075  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4076  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4077}
4078
4079bool os::is_thread_cpu_time_supported() {
4080  return true;
4081}
4082
4083// System loadavg support. Returns -1 if load average cannot be obtained.
4084// For now just return the system wide load average (no processor sets).
4085int os::loadavg(double values[], int nelem) {
4086
4087  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4088  guarantee(values, "argument error");
4089
4090  if (os::Aix::on_pase()) {
4091
4092    // AS/400 PASE: use libo4 porting library
4093    double v[3] = { 0.0, 0.0, 0.0 };
4094
4095    if (libo4::get_load_avg(v, v + 1, v + 2)) {
4096      for (int i = 0; i < nelem; i ++) {
4097        values[i] = v[i];
4098      }
4099      return nelem;
4100    } else {
4101      return -1;
4102    }
4103
4104  } else {
4105
4106    // AIX: use libperfstat
4107    libperfstat::cpuinfo_t ci;
4108    if (libperfstat::get_cpuinfo(&ci)) {
4109      for (int i = 0; i < nelem; i++) {
4110        values[i] = ci.loadavg[i];
4111      }
4112    } else {
4113      return -1;
4114    }
4115    return nelem;
4116  }
4117}
4118
4119void os::pause() {
4120  char filename[MAX_PATH];
4121  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4122    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4123  } else {
4124    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4125  }
4126
4127  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4128  if (fd != -1) {
4129    struct stat buf;
4130    ::close(fd);
4131    while (::stat(filename, &buf) == 0) {
4132      (void)::poll(NULL, 0, 100);
4133    }
4134  } else {
4135    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4136  }
4137}
4138
4139bool os::Aix::is_primordial_thread() {
4140  if (pthread_self() == (pthread_t)1) {
4141    return true;
4142  } else {
4143    return false;
4144  }
4145}
4146
4147// OS recognitions (PASE/AIX, OS level) call this before calling any
4148// one of Aix::on_pase(), Aix::os_version() static
4149void os::Aix::initialize_os_info() {
4150
4151  assert(_on_pase == -1 && _os_version == 0, "already called.");
4152
4153  struct utsname uts;
4154  memset(&uts, 0, sizeof(uts));
4155  strcpy(uts.sysname, "?");
4156  if (::uname(&uts) == -1) {
4157    trcVerbose("uname failed (%d)", errno);
4158    guarantee(0, "Could not determine whether we run on AIX or PASE");
4159  } else {
4160    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4161               "node \"%s\" machine \"%s\"\n",
4162               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4163    const int major = atoi(uts.version);
4164    assert(major > 0, "invalid OS version");
4165    const int minor = atoi(uts.release);
4166    assert(minor > 0, "invalid OS release");
4167    _os_version = (major << 24) | (minor << 16);
4168    char ver_str[20] = {0};
4169    char *name_str = "unknown OS";
4170    if (strcmp(uts.sysname, "OS400") == 0) {
4171      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4172      _on_pase = 1;
4173      if (os_version_short() < 0x0504) {
4174        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4175        assert(false, "OS/400 release too old.");
4176      }
4177      name_str = "OS/400 (pase)";
4178      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4179    } else if (strcmp(uts.sysname, "AIX") == 0) {
4180      // We run on AIX. We do not support versions older than AIX 5.3.
4181      _on_pase = 0;
4182      // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4183      odmWrapper::determine_os_kernel_version(&_os_version);
4184      if (os_version_short() < 0x0503) {
4185        trcVerbose("AIX release older than AIX 5.3 not supported.");
4186        assert(false, "AIX release too old.");
4187      }
4188      name_str = "AIX";
4189      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4190                   major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4191    } else {
4192      assert(false, name_str);
4193    }
4194    trcVerbose("We run on %s %s", name_str, ver_str);
4195  }
4196
4197  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4198} // end: os::Aix::initialize_os_info()
4199
4200// Scan environment for important settings which might effect the VM.
4201// Trace out settings. Warn about invalid settings and/or correct them.
4202//
4203// Must run after os::Aix::initialue_os_info().
4204void os::Aix::scan_environment() {
4205
4206  char* p;
4207  int rc;
4208
4209  // Warn explicity if EXTSHM=ON is used. That switch changes how
4210  // System V shared memory behaves. One effect is that page size of
4211  // shared memory cannot be change dynamically, effectivly preventing
4212  // large pages from working.
4213  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4214  // recommendation is (in OSS notes) to switch it off.
4215  p = ::getenv("EXTSHM");
4216  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4217  if (p && strcasecmp(p, "ON") == 0) {
4218    _extshm = 1;
4219    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4220    if (!AllowExtshm) {
4221      // We allow under certain conditions the user to continue. However, we want this
4222      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4223      // that the VM is not able to allocate 64k pages for the heap.
4224      // We do not want to run with reduced performance.
4225      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4226    }
4227  } else {
4228    _extshm = 0;
4229  }
4230
4231  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4232  // Not tested, not supported.
4233  //
4234  // Note that it might be worth the trouble to test and to require it, if only to
4235  // get useful return codes for mprotect.
4236  //
4237  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4238  // exec() ? before loading the libjvm ? ....)
4239  p = ::getenv("XPG_SUS_ENV");
4240  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4241  if (p && strcmp(p, "ON") == 0) {
4242    _xpg_sus_mode = 1;
4243    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4244    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4245    // clobber address ranges. If we ever want to support that, we have to do some
4246    // testing first.
4247    guarantee(false, "XPG_SUS_ENV=ON not supported");
4248  } else {
4249    _xpg_sus_mode = 0;
4250  }
4251
4252  if (os::Aix::on_pase()) {
4253    p = ::getenv("QIBM_MULTI_THREADED");
4254    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4255  }
4256
4257  p = ::getenv("LDR_CNTRL");
4258  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4259  if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4260    if (p && ::strstr(p, "TEXTPSIZE")) {
4261      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4262        "you may experience hangs or crashes on OS/400 V7R1.");
4263    }
4264  }
4265
4266  p = ::getenv("AIXTHREAD_GUARDPAGES");
4267  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4268
4269} // end: os::Aix::scan_environment()
4270
4271// PASE: initialize the libo4 library (PASE porting library).
4272void os::Aix::initialize_libo4() {
4273  guarantee(os::Aix::on_pase(), "OS/400 only.");
4274  if (!libo4::init()) {
4275    trcVerbose("libo4 initialization failed.");
4276    assert(false, "libo4 initialization failed");
4277  } else {
4278    trcVerbose("libo4 initialized.");
4279  }
4280}
4281
4282// AIX: initialize the libperfstat library.
4283void os::Aix::initialize_libperfstat() {
4284  assert(os::Aix::on_aix(), "AIX only");
4285  if (!libperfstat::init()) {
4286    trcVerbose("libperfstat initialization failed.");
4287    assert(false, "libperfstat initialization failed");
4288  } else {
4289    trcVerbose("libperfstat initialized.");
4290  }
4291}
4292
4293/////////////////////////////////////////////////////////////////////////////
4294// thread stack
4295
4296// Get the current stack base from the OS (actually, the pthread library).
4297// Note: usually not page aligned.
4298address os::current_stack_base() {
4299  AixMisc::stackbounds_t bounds;
4300  bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4301  guarantee(rc, "Unable to retrieve stack bounds.");
4302  return bounds.base;
4303}
4304
4305// Get the current stack size from the OS (actually, the pthread library).
4306// Returned size is such that (base - size) is always aligned to page size.
4307size_t os::current_stack_size() {
4308  AixMisc::stackbounds_t bounds;
4309  bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4310  guarantee(rc, "Unable to retrieve stack bounds.");
4311  // Align the returned stack size such that the stack low address
4312  // is aligned to page size (Note: base is usually not and we do not care).
4313  // We need to do this because caller code will assume stack low address is
4314  // page aligned and will place guard pages without checking.
4315  address low = bounds.base - bounds.size;
4316  address low_aligned = (address)align_up(low, os::vm_page_size());
4317  size_t s = bounds.base - low_aligned;
4318  return s;
4319}
4320
4321extern char** environ;
4322
4323// Run the specified command in a separate process. Return its exit value,
4324// or -1 on failure (e.g. can't fork a new process).
4325// Unlike system(), this function can be called from signal handler. It
4326// doesn't block SIGINT et al.
4327int os::fork_and_exec(char* cmd) {
4328  char * argv[4] = {"sh", "-c", cmd, NULL};
4329
4330  pid_t pid = fork();
4331
4332  if (pid < 0) {
4333    // fork failed
4334    return -1;
4335
4336  } else if (pid == 0) {
4337    // child process
4338
4339    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4340    execve("/usr/bin/sh", argv, environ);
4341
4342    // execve failed
4343    _exit(-1);
4344
4345  } else {
4346    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4347    // care about the actual exit code, for now.
4348
4349    int status;
4350
4351    // Wait for the child process to exit. This returns immediately if
4352    // the child has already exited. */
4353    while (waitpid(pid, &status, 0) < 0) {
4354      switch (errno) {
4355        case ECHILD: return 0;
4356        case EINTR: break;
4357        default: return -1;
4358      }
4359    }
4360
4361    if (WIFEXITED(status)) {
4362      // The child exited normally; get its exit code.
4363      return WEXITSTATUS(status);
4364    } else if (WIFSIGNALED(status)) {
4365      // The child exited because of a signal.
4366      // The best value to return is 0x80 + signal number,
4367      // because that is what all Unix shells do, and because
4368      // it allows callers to distinguish between process exit and
4369      // process death by signal.
4370      return 0x80 + WTERMSIG(status);
4371    } else {
4372      // Unknown exit code; pass it through.
4373      return status;
4374    }
4375  }
4376  return -1;
4377}
4378
4379// is_headless_jre()
4380//
4381// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4382// in order to report if we are running in a headless jre.
4383//
4384// Since JDK8 xawt/libmawt.so is moved into the same directory
4385// as libawt.so, and renamed libawt_xawt.so
4386bool os::is_headless_jre() {
4387  struct stat statbuf;
4388  char buf[MAXPATHLEN];
4389  char libmawtpath[MAXPATHLEN];
4390  const char *xawtstr = "/xawt/libmawt.so";
4391  const char *new_xawtstr = "/libawt_xawt.so";
4392
4393  char *p;
4394
4395  // Get path to libjvm.so
4396  os::jvm_path(buf, sizeof(buf));
4397
4398  // Get rid of libjvm.so
4399  p = strrchr(buf, '/');
4400  if (p == NULL) return false;
4401  else *p = '\0';
4402
4403  // Get rid of client or server
4404  p = strrchr(buf, '/');
4405  if (p == NULL) return false;
4406  else *p = '\0';
4407
4408  // check xawt/libmawt.so
4409  strcpy(libmawtpath, buf);
4410  strcat(libmawtpath, xawtstr);
4411  if (::stat(libmawtpath, &statbuf) == 0) return false;
4412
4413  // check libawt_xawt.so
4414  strcpy(libmawtpath, buf);
4415  strcat(libmawtpath, new_xawtstr);
4416  if (::stat(libmawtpath, &statbuf) == 0) return false;
4417
4418  return true;
4419}
4420
4421// Get the default path to the core file
4422// Returns the length of the string
4423int os::get_core_path(char* buffer, size_t bufferSize) {
4424  const char* p = get_current_directory(buffer, bufferSize);
4425
4426  if (p == NULL) {
4427    assert(p != NULL, "failed to get current directory");
4428    return 0;
4429  }
4430
4431  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4432                                               p, current_process_id());
4433
4434  return strlen(buffer);
4435}
4436
4437#ifndef PRODUCT
4438void TestReserveMemorySpecial_test() {
4439  // No tests available for this platform
4440}
4441#endif
4442
4443bool os::start_debugging(char *buf, int buflen) {
4444  int len = (int)strlen(buf);
4445  char *p = &buf[len];
4446
4447  jio_snprintf(p, buflen -len,
4448                 "\n\n"
4449                 "Do you want to debug the problem?\n\n"
4450                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4451                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4452                 "Otherwise, press RETURN to abort...",
4453                 os::current_process_id(),
4454                 os::current_thread_id(), thread_self());
4455
4456  bool yes = os::message_box("Unexpected Error", buf);
4457
4458  if (yes) {
4459    // yes, user asked VM to launch debugger
4460    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4461
4462    os::fork_and_exec(buf);
4463    yes = false;
4464  }
4465  return yes;
4466}
4467
4468static inline time_t get_mtime(const char* filename) {
4469  struct stat st;
4470  int ret = os::stat(filename, &st);
4471  assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4472  return st.st_mtime;
4473}
4474
4475int os::compare_file_modified_times(const char* file1, const char* file2) {
4476  time_t t1 = get_mtime(file1);
4477  time_t t2 = get_mtime(file2);
4478  return t1 - t2;
4479}
4480