os_aix.cpp revision 13447:9a75c2f7bf06
1/*
2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "logging/log.hpp"
40#include "libo4.hpp"
41#include "libperfstat_aix.hpp"
42#include "libodm_aix.hpp"
43#include "loadlib_aix.hpp"
44#include "memory/allocation.inline.hpp"
45#include "memory/filemap.hpp"
46#include "misc_aix.hpp"
47#include "oops/oop.inline.hpp"
48#include "os_aix.inline.hpp"
49#include "os_share_aix.hpp"
50#include "porting_aix.hpp"
51#include "prims/jniFastGetField.hpp"
52#include "prims/jvm.h"
53#include "prims/jvm_misc.hpp"
54#include "runtime/arguments.hpp"
55#include "runtime/atomic.hpp"
56#include "runtime/extendedPC.hpp"
57#include "runtime/globals.hpp"
58#include "runtime/interfaceSupport.hpp"
59#include "runtime/java.hpp"
60#include "runtime/javaCalls.hpp"
61#include "runtime/mutexLocker.hpp"
62#include "runtime/objectMonitor.hpp"
63#include "runtime/orderAccess.inline.hpp"
64#include "runtime/os.hpp"
65#include "runtime/osThread.hpp"
66#include "runtime/perfMemory.hpp"
67#include "runtime/sharedRuntime.hpp"
68#include "runtime/statSampler.hpp"
69#include "runtime/stubRoutines.hpp"
70#include "runtime/thread.inline.hpp"
71#include "runtime/threadCritical.hpp"
72#include "runtime/timer.hpp"
73#include "runtime/vm_version.hpp"
74#include "services/attachListener.hpp"
75#include "services/runtimeService.hpp"
76#include "utilities/align.hpp"
77#include "utilities/decoder.hpp"
78#include "utilities/defaultStream.hpp"
79#include "utilities/events.hpp"
80#include "utilities/growableArray.hpp"
81#include "utilities/vmError.hpp"
82
83// put OS-includes here (sorted alphabetically)
84#include <errno.h>
85#include <fcntl.h>
86#include <inttypes.h>
87#include <poll.h>
88#include <procinfo.h>
89#include <pthread.h>
90#include <pwd.h>
91#include <semaphore.h>
92#include <signal.h>
93#include <stdint.h>
94#include <stdio.h>
95#include <string.h>
96#include <unistd.h>
97#include <sys/ioctl.h>
98#include <sys/ipc.h>
99#include <sys/mman.h>
100#include <sys/resource.h>
101#include <sys/select.h>
102#include <sys/shm.h>
103#include <sys/socket.h>
104#include <sys/stat.h>
105#include <sys/sysinfo.h>
106#include <sys/systemcfg.h>
107#include <sys/time.h>
108#include <sys/times.h>
109#include <sys/types.h>
110#include <sys/utsname.h>
111#include <sys/vminfo.h>
112#include <sys/wait.h>
113
114// Missing prototypes for various system APIs.
115extern "C"
116int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
117
118#if !defined(_AIXVERSION_610)
119extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
120extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
121extern "C" int getargs   (procsinfo*, int, char*, int);
122#endif
123
124#define MAX_PATH (2 * K)
125
126// for timer info max values which include all bits
127#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
128// for multipage initialization error analysis (in 'g_multipage_error')
129#define ERROR_MP_OS_TOO_OLD                          100
130#define ERROR_MP_EXTSHM_ACTIVE                       101
131#define ERROR_MP_VMGETINFO_FAILED                    102
132#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
133
134static address resolve_function_descriptor_to_code_pointer(address p);
135
136static void vmembk_print_on(outputStream* os);
137
138////////////////////////////////////////////////////////////////////////////////
139// global variables (for a description see os_aix.hpp)
140
141julong    os::Aix::_physical_memory = 0;
142
143pthread_t os::Aix::_main_thread = ((pthread_t)0);
144int       os::Aix::_page_size = -1;
145
146// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
147int       os::Aix::_on_pase = -1;
148
149// 0 = uninitialized, otherwise 32 bit number:
150//  0xVVRRTTSS
151//  VV - major version
152//  RR - minor version
153//  TT - tech level, if known, 0 otherwise
154//  SS - service pack, if known, 0 otherwise
155uint32_t  os::Aix::_os_version = 0;
156
157// -1 = uninitialized, 0 - no, 1 - yes
158int       os::Aix::_xpg_sus_mode = -1;
159
160// -1 = uninitialized, 0 - no, 1 - yes
161int       os::Aix::_extshm = -1;
162
163////////////////////////////////////////////////////////////////////////////////
164// local variables
165
166static jlong    initial_time_count = 0;
167static int      clock_tics_per_sec = 100;
168static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
169static bool     check_signals      = true;
170static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
171static sigset_t SR_sigset;
172
173// Process break recorded at startup.
174static address g_brk_at_startup = NULL;
175
176// This describes the state of multipage support of the underlying
177// OS. Note that this is of no interest to the outsize world and
178// therefore should not be defined in AIX class.
179//
180// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
181// latter two (16M "large" resp. 16G "huge" pages) require special
182// setup and are normally not available.
183//
184// AIX supports multiple page sizes per process, for:
185//  - Stack (of the primordial thread, so not relevant for us)
186//  - Data - data, bss, heap, for us also pthread stacks
187//  - Text - text code
188//  - shared memory
189//
190// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
191// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
192//
193// For shared memory, page size can be set dynamically via
194// shmctl(). Different shared memory regions can have different page
195// sizes.
196//
197// More information can be found at AIBM info center:
198//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
199//
200static struct {
201  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
202  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
203  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
204  size_t pthr_stack_pagesize; // stack page size of pthread threads
205  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
206  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
207  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
208  int error;                  // Error describing if something went wrong at multipage init.
209} g_multipage_support = {
210  (size_t) -1,
211  (size_t) -1,
212  (size_t) -1,
213  (size_t) -1,
214  (size_t) -1,
215  false, false,
216  0
217};
218
219// We must not accidentally allocate memory close to the BRK - even if
220// that would work - because then we prevent the BRK segment from
221// growing which may result in a malloc OOM even though there is
222// enough memory. The problem only arises if we shmat() or mmap() at
223// a specific wish address, e.g. to place the heap in a
224// compressed-oops-friendly way.
225static bool is_close_to_brk(address a) {
226  assert0(g_brk_at_startup != NULL);
227  if (a >= g_brk_at_startup &&
228      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
229    return true;
230  }
231  return false;
232}
233
234julong os::available_memory() {
235  return Aix::available_memory();
236}
237
238julong os::Aix::available_memory() {
239  // Avoid expensive API call here, as returned value will always be null.
240  if (os::Aix::on_pase()) {
241    return 0x0LL;
242  }
243  os::Aix::meminfo_t mi;
244  if (os::Aix::get_meminfo(&mi)) {
245    return mi.real_free;
246  } else {
247    return ULONG_MAX;
248  }
249}
250
251julong os::physical_memory() {
252  return Aix::physical_memory();
253}
254
255// Return true if user is running as root.
256
257bool os::have_special_privileges() {
258  static bool init = false;
259  static bool privileges = false;
260  if (!init) {
261    privileges = (getuid() != geteuid()) || (getgid() != getegid());
262    init = true;
263  }
264  return privileges;
265}
266
267// Helper function, emulates disclaim64 using multiple 32bit disclaims
268// because we cannot use disclaim64() on AS/400 and old AIX releases.
269static bool my_disclaim64(char* addr, size_t size) {
270
271  if (size == 0) {
272    return true;
273  }
274
275  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
276  const unsigned int maxDisclaimSize = 0x40000000;
277
278  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
279  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
280
281  char* p = addr;
282
283  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
284    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
285      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
286      return false;
287    }
288    p += maxDisclaimSize;
289  }
290
291  if (lastDisclaimSize > 0) {
292    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
293      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
294      return false;
295    }
296  }
297
298  return true;
299}
300
301// Cpu architecture string
302#if defined(PPC32)
303static char cpu_arch[] = "ppc";
304#elif defined(PPC64)
305static char cpu_arch[] = "ppc64";
306#else
307#error Add appropriate cpu_arch setting
308#endif
309
310// Wrap the function "vmgetinfo" which is not available on older OS releases.
311static int checked_vmgetinfo(void *out, int command, int arg) {
312  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
313    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
314  }
315  return ::vmgetinfo(out, command, arg);
316}
317
318// Given an address, returns the size of the page backing that address.
319size_t os::Aix::query_pagesize(void* addr) {
320
321  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
322    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
323    return 4*K;
324  }
325
326  vm_page_info pi;
327  pi.addr = (uint64_t)addr;
328  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
329    return pi.pagesize;
330  } else {
331    assert(false, "vmgetinfo failed to retrieve page size");
332    return 4*K;
333  }
334}
335
336void os::Aix::initialize_system_info() {
337
338  // Get the number of online(logical) cpus instead of configured.
339  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
340  assert(_processor_count > 0, "_processor_count must be > 0");
341
342  // Retrieve total physical storage.
343  os::Aix::meminfo_t mi;
344  if (!os::Aix::get_meminfo(&mi)) {
345    assert(false, "os::Aix::get_meminfo failed.");
346  }
347  _physical_memory = (julong) mi.real_total;
348}
349
350// Helper function for tracing page sizes.
351static const char* describe_pagesize(size_t pagesize) {
352  switch (pagesize) {
353    case 4*K : return "4K";
354    case 64*K: return "64K";
355    case 16*M: return "16M";
356    case 16*G: return "16G";
357    default:
358      assert(false, "surprise");
359      return "??";
360  }
361}
362
363// Probe OS for multipage support.
364// Will fill the global g_multipage_support structure.
365// Must be called before calling os::large_page_init().
366static void query_multipage_support() {
367
368  guarantee(g_multipage_support.pagesize == -1,
369            "do not call twice");
370
371  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
372
373  // This really would surprise me.
374  assert(g_multipage_support.pagesize == 4*K, "surprise!");
375
376  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
377  // Default data page size is defined either by linker options (-bdatapsize)
378  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
379  // default should be 4K.
380  {
381    void* p = ::malloc(16*M);
382    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
383    ::free(p);
384  }
385
386  // Query default shm page size (LDR_CNTRL SHMPSIZE).
387  // Note that this is pure curiosity. We do not rely on default page size but set
388  // our own page size after allocated.
389  {
390    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
391    guarantee(shmid != -1, "shmget failed");
392    void* p = ::shmat(shmid, NULL, 0);
393    ::shmctl(shmid, IPC_RMID, NULL);
394    guarantee(p != (void*) -1, "shmat failed");
395    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
396    ::shmdt(p);
397  }
398
399  // Before querying the stack page size, make sure we are not running as primordial
400  // thread (because primordial thread's stack may have different page size than
401  // pthread thread stacks). Running a VM on the primordial thread won't work for a
402  // number of reasons so we may just as well guarantee it here.
403  guarantee0(!os::Aix::is_primordial_thread());
404
405  // Query pthread stack page size. Should be the same as data page size because
406  // pthread stacks are allocated from C-Heap.
407  {
408    int dummy = 0;
409    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
410  }
411
412  // Query default text page size (LDR_CNTRL TEXTPSIZE).
413  {
414    address any_function =
415      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
416    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
417  }
418
419  // Now probe for support of 64K pages and 16M pages.
420
421  // Before OS/400 V6R1, there is no support for pages other than 4K.
422  if (os::Aix::on_pase_V5R4_or_older()) {
423    trcVerbose("OS/400 < V6R1 - no large page support.");
424    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
425    goto query_multipage_support_end;
426  }
427
428  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
429  {
430    const int MAX_PAGE_SIZES = 4;
431    psize_t sizes[MAX_PAGE_SIZES];
432    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
433    if (num_psizes == -1) {
434      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
435      trcVerbose("disabling multipage support.");
436      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
437      goto query_multipage_support_end;
438    }
439    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
440    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
441    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
442    for (int i = 0; i < num_psizes; i ++) {
443      trcVerbose(" %s ", describe_pagesize(sizes[i]));
444    }
445
446    // Can we use 64K, 16M pages?
447    for (int i = 0; i < num_psizes; i ++) {
448      const size_t pagesize = sizes[i];
449      if (pagesize != 64*K && pagesize != 16*M) {
450        continue;
451      }
452      bool can_use = false;
453      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
454      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
455        IPC_CREAT | S_IRUSR | S_IWUSR);
456      guarantee0(shmid != -1); // Should always work.
457      // Try to set pagesize.
458      struct shmid_ds shm_buf = { 0 };
459      shm_buf.shm_pagesize = pagesize;
460      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
461        const int en = errno;
462        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
463        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
464          errno);
465      } else {
466        // Attach and double check pageisze.
467        void* p = ::shmat(shmid, NULL, 0);
468        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
469        guarantee0(p != (void*) -1); // Should always work.
470        const size_t real_pagesize = os::Aix::query_pagesize(p);
471        if (real_pagesize != pagesize) {
472          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
473        } else {
474          can_use = true;
475        }
476        ::shmdt(p);
477      }
478      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
479      if (pagesize == 64*K) {
480        g_multipage_support.can_use_64K_pages = can_use;
481      } else if (pagesize == 16*M) {
482        g_multipage_support.can_use_16M_pages = can_use;
483      }
484    }
485
486  } // end: check which pages can be used for shared memory
487
488query_multipage_support_end:
489
490  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
491      describe_pagesize(g_multipage_support.pagesize));
492  trcVerbose("Data page size (C-Heap, bss, etc): %s",
493      describe_pagesize(g_multipage_support.datapsize));
494  trcVerbose("Text page size: %s",
495      describe_pagesize(g_multipage_support.textpsize));
496  trcVerbose("Thread stack page size (pthread): %s",
497      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
498  trcVerbose("Default shared memory page size: %s",
499      describe_pagesize(g_multipage_support.shmpsize));
500  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
501      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
502  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
503      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
504  trcVerbose("Multipage error details: %d",
505      g_multipage_support.error);
506
507  // sanity checks
508  assert0(g_multipage_support.pagesize == 4*K);
509  assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
510  assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
511  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
512  assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
513
514}
515
516void os::init_system_properties_values() {
517
518#define DEFAULT_LIBPATH "/lib:/usr/lib"
519#define EXTENSIONS_DIR  "/lib/ext"
520
521  // Buffer that fits several sprintfs.
522  // Note that the space for the trailing null is provided
523  // by the nulls included by the sizeof operator.
524  const size_t bufsize =
525    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
526         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
527  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
528
529  // sysclasspath, java_home, dll_dir
530  {
531    char *pslash;
532    os::jvm_path(buf, bufsize);
533
534    // Found the full path to libjvm.so.
535    // Now cut the path to <java_home>/jre if we can.
536    pslash = strrchr(buf, '/');
537    if (pslash != NULL) {
538      *pslash = '\0';            // Get rid of /libjvm.so.
539    }
540    pslash = strrchr(buf, '/');
541    if (pslash != NULL) {
542      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
543    }
544    Arguments::set_dll_dir(buf);
545
546    if (pslash != NULL) {
547      pslash = strrchr(buf, '/');
548      if (pslash != NULL) {
549        *pslash = '\0';        // Get rid of /lib.
550      }
551    }
552    Arguments::set_java_home(buf);
553    set_boot_path('/', ':');
554  }
555
556  // Where to look for native libraries.
557
558  // On Aix we get the user setting of LIBPATH.
559  // Eventually, all the library path setting will be done here.
560  // Get the user setting of LIBPATH.
561  const char *v = ::getenv("LIBPATH");
562  const char *v_colon = ":";
563  if (v == NULL) { v = ""; v_colon = ""; }
564
565  // Concatenate user and invariant part of ld_library_path.
566  // That's +1 for the colon and +1 for the trailing '\0'.
567  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
568  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
569  Arguments::set_library_path(ld_library_path);
570  FREE_C_HEAP_ARRAY(char, ld_library_path);
571
572  // Extensions directories.
573  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
574  Arguments::set_ext_dirs(buf);
575
576  FREE_C_HEAP_ARRAY(char, buf);
577
578#undef DEFAULT_LIBPATH
579#undef EXTENSIONS_DIR
580}
581
582////////////////////////////////////////////////////////////////////////////////
583// breakpoint support
584
585void os::breakpoint() {
586  BREAKPOINT;
587}
588
589extern "C" void breakpoint() {
590  // use debugger to set breakpoint here
591}
592
593////////////////////////////////////////////////////////////////////////////////
594// signal support
595
596debug_only(static bool signal_sets_initialized = false);
597static sigset_t unblocked_sigs, vm_sigs;
598
599bool os::Aix::is_sig_ignored(int sig) {
600  struct sigaction oact;
601  sigaction(sig, (struct sigaction*)NULL, &oact);
602  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
603    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
604  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
605    return true;
606  } else {
607    return false;
608  }
609}
610
611void os::Aix::signal_sets_init() {
612  // Should also have an assertion stating we are still single-threaded.
613  assert(!signal_sets_initialized, "Already initialized");
614  // Fill in signals that are necessarily unblocked for all threads in
615  // the VM. Currently, we unblock the following signals:
616  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
617  //                         by -Xrs (=ReduceSignalUsage));
618  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
619  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
620  // the dispositions or masks wrt these signals.
621  // Programs embedding the VM that want to use the above signals for their
622  // own purposes must, at this time, use the "-Xrs" option to prevent
623  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
624  // (See bug 4345157, and other related bugs).
625  // In reality, though, unblocking these signals is really a nop, since
626  // these signals are not blocked by default.
627  sigemptyset(&unblocked_sigs);
628  sigaddset(&unblocked_sigs, SIGILL);
629  sigaddset(&unblocked_sigs, SIGSEGV);
630  sigaddset(&unblocked_sigs, SIGBUS);
631  sigaddset(&unblocked_sigs, SIGFPE);
632  sigaddset(&unblocked_sigs, SIGTRAP);
633  sigaddset(&unblocked_sigs, SR_signum);
634
635  if (!ReduceSignalUsage) {
636   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
637     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
638   }
639   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
640     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
641   }
642   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
643     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
644   }
645  }
646  // Fill in signals that are blocked by all but the VM thread.
647  sigemptyset(&vm_sigs);
648  if (!ReduceSignalUsage)
649    sigaddset(&vm_sigs, BREAK_SIGNAL);
650  debug_only(signal_sets_initialized = true);
651}
652
653// These are signals that are unblocked while a thread is running Java.
654// (For some reason, they get blocked by default.)
655sigset_t* os::Aix::unblocked_signals() {
656  assert(signal_sets_initialized, "Not initialized");
657  return &unblocked_sigs;
658}
659
660// These are the signals that are blocked while a (non-VM) thread is
661// running Java. Only the VM thread handles these signals.
662sigset_t* os::Aix::vm_signals() {
663  assert(signal_sets_initialized, "Not initialized");
664  return &vm_sigs;
665}
666
667void os::Aix::hotspot_sigmask(Thread* thread) {
668
669  //Save caller's signal mask before setting VM signal mask
670  sigset_t caller_sigmask;
671  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
672
673  OSThread* osthread = thread->osthread();
674  osthread->set_caller_sigmask(caller_sigmask);
675
676  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
677
678  if (!ReduceSignalUsage) {
679    if (thread->is_VM_thread()) {
680      // Only the VM thread handles BREAK_SIGNAL ...
681      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
682    } else {
683      // ... all other threads block BREAK_SIGNAL
684      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
685    }
686  }
687}
688
689// retrieve memory information.
690// Returns false if something went wrong;
691// content of pmi undefined in this case.
692bool os::Aix::get_meminfo(meminfo_t* pmi) {
693
694  assert(pmi, "get_meminfo: invalid parameter");
695
696  memset(pmi, 0, sizeof(meminfo_t));
697
698  if (os::Aix::on_pase()) {
699    // On PASE, use the libo4 porting library.
700
701    unsigned long long virt_total = 0;
702    unsigned long long real_total = 0;
703    unsigned long long real_free = 0;
704    unsigned long long pgsp_total = 0;
705    unsigned long long pgsp_free = 0;
706    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
707      pmi->virt_total = virt_total;
708      pmi->real_total = real_total;
709      pmi->real_free = real_free;
710      pmi->pgsp_total = pgsp_total;
711      pmi->pgsp_free = pgsp_free;
712      return true;
713    }
714    return false;
715
716  } else {
717
718    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
719    // See:
720    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
721    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
722    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
723    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
724
725    perfstat_memory_total_t psmt;
726    memset (&psmt, '\0', sizeof(psmt));
727    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
728    if (rc == -1) {
729      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
730      assert(0, "perfstat_memory_total() failed");
731      return false;
732    }
733
734    assert(rc == 1, "perfstat_memory_total() - weird return code");
735
736    // excerpt from
737    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
738    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
739    // The fields of perfstat_memory_total_t:
740    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
741    // u_longlong_t real_total         Total real memory (in 4 KB pages).
742    // u_longlong_t real_free          Free real memory (in 4 KB pages).
743    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
744    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
745
746    pmi->virt_total = psmt.virt_total * 4096;
747    pmi->real_total = psmt.real_total * 4096;
748    pmi->real_free = psmt.real_free * 4096;
749    pmi->pgsp_total = psmt.pgsp_total * 4096;
750    pmi->pgsp_free = psmt.pgsp_free * 4096;
751
752    return true;
753
754  }
755} // end os::Aix::get_meminfo
756
757//////////////////////////////////////////////////////////////////////////////
758// create new thread
759
760// Thread start routine for all newly created threads
761static void *thread_native_entry(Thread *thread) {
762
763  // find out my own stack dimensions
764  {
765    // actually, this should do exactly the same as thread->record_stack_base_and_size...
766    thread->set_stack_base(os::current_stack_base());
767    thread->set_stack_size(os::current_stack_size());
768  }
769
770  const pthread_t pthread_id = ::pthread_self();
771  const tid_t kernel_thread_id = ::thread_self();
772
773  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
774    os::current_thread_id(), (uintx) kernel_thread_id);
775
776  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
777  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
778  // tools hook pthread_create(). In this case, we may run into problems establishing
779  // guard pages on those stacks, because the stacks may reside in memory which is not
780  // protectable (shmated).
781  if (thread->stack_base() > ::sbrk(0)) {
782    log_warning(os, thread)("Thread stack not in data segment.");
783  }
784
785  // Try to randomize the cache line index of hot stack frames.
786  // This helps when threads of the same stack traces evict each other's
787  // cache lines. The threads can be either from the same JVM instance, or
788  // from different JVM instances. The benefit is especially true for
789  // processors with hyperthreading technology.
790
791  static int counter = 0;
792  int pid = os::current_process_id();
793  alloca(((pid ^ counter++) & 7) * 128);
794
795  thread->initialize_thread_current();
796
797  OSThread* osthread = thread->osthread();
798
799  // Thread_id is pthread id.
800  osthread->set_thread_id(pthread_id);
801
802  // .. but keep kernel thread id too for diagnostics
803  osthread->set_kernel_thread_id(kernel_thread_id);
804
805  // Initialize signal mask for this thread.
806  os::Aix::hotspot_sigmask(thread);
807
808  // Initialize floating point control register.
809  os::Aix::init_thread_fpu_state();
810
811  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
812
813  // Call one more level start routine.
814  thread->run();
815
816  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
817    os::current_thread_id(), (uintx) kernel_thread_id);
818
819  // If a thread has not deleted itself ("delete this") as part of its
820  // termination sequence, we have to ensure thread-local-storage is
821  // cleared before we actually terminate. No threads should ever be
822  // deleted asynchronously with respect to their termination.
823  if (Thread::current_or_null_safe() != NULL) {
824    assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
825    thread->clear_thread_current();
826  }
827
828  return 0;
829}
830
831bool os::create_thread(Thread* thread, ThreadType thr_type,
832                       size_t req_stack_size) {
833
834  assert(thread->osthread() == NULL, "caller responsible");
835
836  // Allocate the OSThread object.
837  OSThread* osthread = new OSThread(NULL, NULL);
838  if (osthread == NULL) {
839    return false;
840  }
841
842  // Set the correct thread state.
843  osthread->set_thread_type(thr_type);
844
845  // Initial state is ALLOCATED but not INITIALIZED
846  osthread->set_state(ALLOCATED);
847
848  thread->set_osthread(osthread);
849
850  // Init thread attributes.
851  pthread_attr_t attr;
852  pthread_attr_init(&attr);
853  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
854
855  // Make sure we run in 1:1 kernel-user-thread mode.
856  if (os::Aix::on_aix()) {
857    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
858    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
859  }
860
861  // Start in suspended state, and in os::thread_start, wake the thread up.
862  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
863
864  // Calculate stack size if it's not specified by caller.
865  size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
866
867  // On Aix, pthread_attr_setstacksize fails with huge values and leaves the
868  // thread size in attr unchanged. If this is the minimal stack size as set
869  // by pthread_attr_init this leads to crashes after thread creation. E.g. the
870  // guard pages might not fit on the tiny stack created.
871  int ret = pthread_attr_setstacksize(&attr, stack_size);
872  if (ret != 0) {
873    log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k",
874                            stack_size / K);
875  }
876
877  // Configure libc guard page.
878  ret = pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
879
880  pthread_t tid = 0;
881  if (ret == 0) {
882    ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
883  }
884
885  if (ret == 0) {
886    char buf[64];
887    log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
888      (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
889  } else {
890    char buf[64];
891    log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
892      ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
893  }
894
895  pthread_attr_destroy(&attr);
896
897  if (ret != 0) {
898    // Need to clean up stuff we've allocated so far.
899    thread->set_osthread(NULL);
900    delete osthread;
901    return false;
902  }
903
904  // OSThread::thread_id is the pthread id.
905  osthread->set_thread_id(tid);
906
907  return true;
908}
909
910/////////////////////////////////////////////////////////////////////////////
911// attach existing thread
912
913// bootstrap the main thread
914bool os::create_main_thread(JavaThread* thread) {
915  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
916  return create_attached_thread(thread);
917}
918
919bool os::create_attached_thread(JavaThread* thread) {
920#ifdef ASSERT
921    thread->verify_not_published();
922#endif
923
924  // Allocate the OSThread object
925  OSThread* osthread = new OSThread(NULL, NULL);
926
927  if (osthread == NULL) {
928    return false;
929  }
930
931  const pthread_t pthread_id = ::pthread_self();
932  const tid_t kernel_thread_id = ::thread_self();
933
934  // OSThread::thread_id is the pthread id.
935  osthread->set_thread_id(pthread_id);
936
937  // .. but keep kernel thread id too for diagnostics
938  osthread->set_kernel_thread_id(kernel_thread_id);
939
940  // initialize floating point control register
941  os::Aix::init_thread_fpu_state();
942
943  // Initial thread state is RUNNABLE
944  osthread->set_state(RUNNABLE);
945
946  thread->set_osthread(osthread);
947
948  if (UseNUMA) {
949    int lgrp_id = os::numa_get_group_id();
950    if (lgrp_id != -1) {
951      thread->set_lgrp_id(lgrp_id);
952    }
953  }
954
955  // initialize signal mask for this thread
956  // and save the caller's signal mask
957  os::Aix::hotspot_sigmask(thread);
958
959  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
960    os::current_thread_id(), (uintx) kernel_thread_id);
961
962  return true;
963}
964
965void os::pd_start_thread(Thread* thread) {
966  int status = pthread_continue_np(thread->osthread()->pthread_id());
967  assert(status == 0, "thr_continue failed");
968}
969
970// Free OS resources related to the OSThread
971void os::free_thread(OSThread* osthread) {
972  assert(osthread != NULL, "osthread not set");
973
974  // We are told to free resources of the argument thread,
975  // but we can only really operate on the current thread.
976  assert(Thread::current()->osthread() == osthread,
977         "os::free_thread but not current thread");
978
979  // Restore caller's signal mask
980  sigset_t sigmask = osthread->caller_sigmask();
981  pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
982
983  delete osthread;
984}
985
986////////////////////////////////////////////////////////////////////////////////
987// time support
988
989// Time since start-up in seconds to a fine granularity.
990// Used by VMSelfDestructTimer and the MemProfiler.
991double os::elapsedTime() {
992  return (double)(os::elapsed_counter()) * 0.000001;
993}
994
995jlong os::elapsed_counter() {
996  timeval time;
997  int status = gettimeofday(&time, NULL);
998  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
999}
1000
1001jlong os::elapsed_frequency() {
1002  return (1000 * 1000);
1003}
1004
1005bool os::supports_vtime() { return true; }
1006bool os::enable_vtime()   { return false; }
1007bool os::vtime_enabled()  { return false; }
1008
1009double os::elapsedVTime() {
1010  struct rusage usage;
1011  int retval = getrusage(RUSAGE_THREAD, &usage);
1012  if (retval == 0) {
1013    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1014  } else {
1015    // better than nothing, but not much
1016    return elapsedTime();
1017  }
1018}
1019
1020jlong os::javaTimeMillis() {
1021  timeval time;
1022  int status = gettimeofday(&time, NULL);
1023  assert(status != -1, "aix error at gettimeofday()");
1024  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1025}
1026
1027void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1028  timeval time;
1029  int status = gettimeofday(&time, NULL);
1030  assert(status != -1, "aix error at gettimeofday()");
1031  seconds = jlong(time.tv_sec);
1032  nanos = jlong(time.tv_usec) * 1000;
1033}
1034
1035jlong os::javaTimeNanos() {
1036  if (os::Aix::on_pase()) {
1037
1038    timeval time;
1039    int status = gettimeofday(&time, NULL);
1040    assert(status != -1, "PASE error at gettimeofday()");
1041    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1042    return 1000 * usecs;
1043
1044  } else {
1045    // On AIX use the precision of processors real time clock
1046    // or time base registers.
1047    timebasestruct_t time;
1048    int rc;
1049
1050    // If the CPU has a time register, it will be used and
1051    // we have to convert to real time first. After convertion we have following data:
1052    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1053    // time.tb_low  [nanoseconds after the last full second above]
1054    // We better use mread_real_time here instead of read_real_time
1055    // to ensure that we will get a monotonic increasing time.
1056    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1057      rc = time_base_to_time(&time, TIMEBASE_SZ);
1058      assert(rc != -1, "aix error at time_base_to_time()");
1059    }
1060    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1061  }
1062}
1063
1064void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1065  info_ptr->max_value = ALL_64_BITS;
1066  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1067  info_ptr->may_skip_backward = false;
1068  info_ptr->may_skip_forward = false;
1069  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1070}
1071
1072// Return the real, user, and system times in seconds from an
1073// arbitrary fixed point in the past.
1074bool os::getTimesSecs(double* process_real_time,
1075                      double* process_user_time,
1076                      double* process_system_time) {
1077  struct tms ticks;
1078  clock_t real_ticks = times(&ticks);
1079
1080  if (real_ticks == (clock_t) (-1)) {
1081    return false;
1082  } else {
1083    double ticks_per_second = (double) clock_tics_per_sec;
1084    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1085    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1086    *process_real_time = ((double) real_ticks) / ticks_per_second;
1087
1088    return true;
1089  }
1090}
1091
1092char * os::local_time_string(char *buf, size_t buflen) {
1093  struct tm t;
1094  time_t long_time;
1095  time(&long_time);
1096  localtime_r(&long_time, &t);
1097  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1098               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1099               t.tm_hour, t.tm_min, t.tm_sec);
1100  return buf;
1101}
1102
1103struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1104  return localtime_r(clock, res);
1105}
1106
1107////////////////////////////////////////////////////////////////////////////////
1108// runtime exit support
1109
1110// Note: os::shutdown() might be called very early during initialization, or
1111// called from signal handler. Before adding something to os::shutdown(), make
1112// sure it is async-safe and can handle partially initialized VM.
1113void os::shutdown() {
1114
1115  // allow PerfMemory to attempt cleanup of any persistent resources
1116  perfMemory_exit();
1117
1118  // needs to remove object in file system
1119  AttachListener::abort();
1120
1121  // flush buffered output, finish log files
1122  ostream_abort();
1123
1124  // Check for abort hook
1125  abort_hook_t abort_hook = Arguments::abort_hook();
1126  if (abort_hook != NULL) {
1127    abort_hook();
1128  }
1129}
1130
1131// Note: os::abort() might be called very early during initialization, or
1132// called from signal handler. Before adding something to os::abort(), make
1133// sure it is async-safe and can handle partially initialized VM.
1134void os::abort(bool dump_core, void* siginfo, const void* context) {
1135  os::shutdown();
1136  if (dump_core) {
1137#ifndef PRODUCT
1138    fdStream out(defaultStream::output_fd());
1139    out.print_raw("Current thread is ");
1140    char buf[16];
1141    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1142    out.print_raw_cr(buf);
1143    out.print_raw_cr("Dumping core ...");
1144#endif
1145    ::abort(); // dump core
1146  }
1147
1148  ::exit(1);
1149}
1150
1151// Die immediately, no exit hook, no abort hook, no cleanup.
1152void os::die() {
1153  ::abort();
1154}
1155
1156// This method is a copy of JDK's sysGetLastErrorString
1157// from src/solaris/hpi/src/system_md.c
1158
1159size_t os::lasterror(char *buf, size_t len) {
1160  if (errno == 0) return 0;
1161
1162  const char *s = os::strerror(errno);
1163  size_t n = ::strlen(s);
1164  if (n >= len) {
1165    n = len - 1;
1166  }
1167  ::strncpy(buf, s, n);
1168  buf[n] = '\0';
1169  return n;
1170}
1171
1172intx os::current_thread_id() {
1173  return (intx)pthread_self();
1174}
1175
1176int os::current_process_id() {
1177  return getpid();
1178}
1179
1180// DLL functions
1181
1182const char* os::dll_file_extension() { return ".so"; }
1183
1184// This must be hard coded because it's the system's temporary
1185// directory not the java application's temp directory, ala java.io.tmpdir.
1186const char* os::get_temp_directory() { return "/tmp"; }
1187
1188static bool file_exists(const char* filename) {
1189  struct stat statbuf;
1190  if (filename == NULL || strlen(filename) == 0) {
1191    return false;
1192  }
1193  return os::stat(filename, &statbuf) == 0;
1194}
1195
1196bool os::dll_build_name(char* buffer, size_t buflen,
1197                        const char* pname, const char* fname) {
1198  bool retval = false;
1199  // Copied from libhpi
1200  const size_t pnamelen = pname ? strlen(pname) : 0;
1201
1202  // Return error on buffer overflow.
1203  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1204    *buffer = '\0';
1205    return retval;
1206  }
1207
1208  if (pnamelen == 0) {
1209    snprintf(buffer, buflen, "lib%s.so", fname);
1210    retval = true;
1211  } else if (strchr(pname, *os::path_separator()) != NULL) {
1212    int n;
1213    char** pelements = split_path(pname, &n);
1214    if (pelements == NULL) {
1215      return false;
1216    }
1217    for (int i = 0; i < n; i++) {
1218      // Really shouldn't be NULL, but check can't hurt
1219      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1220        continue; // skip the empty path values
1221      }
1222      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1223      if (file_exists(buffer)) {
1224        retval = true;
1225        break;
1226      }
1227    }
1228    // release the storage
1229    for (int i = 0; i < n; i++) {
1230      if (pelements[i] != NULL) {
1231        FREE_C_HEAP_ARRAY(char, pelements[i]);
1232      }
1233    }
1234    if (pelements != NULL) {
1235      FREE_C_HEAP_ARRAY(char*, pelements);
1236    }
1237  } else {
1238    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1239    retval = true;
1240  }
1241  return retval;
1242}
1243
1244// Check if addr is inside libjvm.so.
1245bool os::address_is_in_vm(address addr) {
1246
1247  // Input could be a real pc or a function pointer literal. The latter
1248  // would be a function descriptor residing in the data segment of a module.
1249  loaded_module_t lm;
1250  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1251    return lm.is_in_vm;
1252  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1253    return lm.is_in_vm;
1254  } else {
1255    return false;
1256  }
1257
1258}
1259
1260// Resolve an AIX function descriptor literal to a code pointer.
1261// If the input is a valid code pointer to a text segment of a loaded module,
1262//   it is returned unchanged.
1263// If the input is a valid AIX function descriptor, it is resolved to the
1264//   code entry point.
1265// If the input is neither a valid function descriptor nor a valid code pointer,
1266//   NULL is returned.
1267static address resolve_function_descriptor_to_code_pointer(address p) {
1268
1269  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1270    // It is a real code pointer.
1271    return p;
1272  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1273    // Pointer to data segment, potential function descriptor.
1274    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1275    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1276      // It is a function descriptor.
1277      return code_entry;
1278    }
1279  }
1280
1281  return NULL;
1282}
1283
1284bool os::dll_address_to_function_name(address addr, char *buf,
1285                                      int buflen, int *offset,
1286                                      bool demangle) {
1287  if (offset) {
1288    *offset = -1;
1289  }
1290  // Buf is not optional, but offset is optional.
1291  assert(buf != NULL, "sanity check");
1292  buf[0] = '\0';
1293
1294  // Resolve function ptr literals first.
1295  addr = resolve_function_descriptor_to_code_pointer(addr);
1296  if (!addr) {
1297    return false;
1298  }
1299
1300  return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1301}
1302
1303bool os::dll_address_to_library_name(address addr, char* buf,
1304                                     int buflen, int* offset) {
1305  if (offset) {
1306    *offset = -1;
1307  }
1308  // Buf is not optional, but offset is optional.
1309  assert(buf != NULL, "sanity check");
1310  buf[0] = '\0';
1311
1312  // Resolve function ptr literals first.
1313  addr = resolve_function_descriptor_to_code_pointer(addr);
1314  if (!addr) {
1315    return false;
1316  }
1317
1318  return AixSymbols::get_module_name(addr, buf, buflen);
1319}
1320
1321// Loads .dll/.so and in case of error it checks if .dll/.so was built
1322// for the same architecture as Hotspot is running on.
1323void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1324
1325  if (ebuf && ebuflen > 0) {
1326    ebuf[0] = '\0';
1327    ebuf[ebuflen - 1] = '\0';
1328  }
1329
1330  if (!filename || strlen(filename) == 0) {
1331    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1332    return NULL;
1333  }
1334
1335  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1336  void * result= ::dlopen(filename, RTLD_LAZY);
1337  if (result != NULL) {
1338    // Reload dll cache. Don't do this in signal handling.
1339    LoadedLibraries::reload();
1340    return result;
1341  } else {
1342    // error analysis when dlopen fails
1343    const char* const error_report = ::dlerror();
1344    if (error_report && ebuf && ebuflen > 0) {
1345      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1346               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1347    }
1348  }
1349  return NULL;
1350}
1351
1352void* os::dll_lookup(void* handle, const char* name) {
1353  void* res = dlsym(handle, name);
1354  return res;
1355}
1356
1357void* os::get_default_process_handle() {
1358  return (void*)::dlopen(NULL, RTLD_LAZY);
1359}
1360
1361void os::print_dll_info(outputStream *st) {
1362  st->print_cr("Dynamic libraries:");
1363  LoadedLibraries::print(st);
1364}
1365
1366void os::get_summary_os_info(char* buf, size_t buflen) {
1367  // There might be something more readable than uname results for AIX.
1368  struct utsname name;
1369  uname(&name);
1370  snprintf(buf, buflen, "%s %s", name.release, name.version);
1371}
1372
1373void os::print_os_info(outputStream* st) {
1374  st->print("OS:");
1375
1376  st->print("uname:");
1377  struct utsname name;
1378  uname(&name);
1379  st->print(name.sysname); st->print(" ");
1380  st->print(name.nodename); st->print(" ");
1381  st->print(name.release); st->print(" ");
1382  st->print(name.version); st->print(" ");
1383  st->print(name.machine);
1384  st->cr();
1385
1386  uint32_t ver = os::Aix::os_version();
1387  st->print_cr("AIX kernel version %u.%u.%u.%u",
1388               (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1389
1390  os::Posix::print_rlimit_info(st);
1391
1392  // load average
1393  st->print("load average:");
1394  double loadavg[3] = {-1.L, -1.L, -1.L};
1395  os::loadavg(loadavg, 3);
1396  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1397  st->cr();
1398
1399  // print wpar info
1400  libperfstat::wparinfo_t wi;
1401  if (libperfstat::get_wparinfo(&wi)) {
1402    st->print_cr("wpar info");
1403    st->print_cr("name: %s", wi.name);
1404    st->print_cr("id:   %d", wi.wpar_id);
1405    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1406  }
1407
1408  // print partition info
1409  libperfstat::partitioninfo_t pi;
1410  if (libperfstat::get_partitioninfo(&pi)) {
1411    st->print_cr("partition info");
1412    st->print_cr(" name: %s", pi.name);
1413  }
1414
1415}
1416
1417void os::print_memory_info(outputStream* st) {
1418
1419  st->print_cr("Memory:");
1420
1421  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1422    describe_pagesize(g_multipage_support.pagesize));
1423  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1424    describe_pagesize(g_multipage_support.datapsize));
1425  st->print_cr("  Text page size:                         %s",
1426    describe_pagesize(g_multipage_support.textpsize));
1427  st->print_cr("  Thread stack page size (pthread):       %s",
1428    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1429  st->print_cr("  Default shared memory page size:        %s",
1430    describe_pagesize(g_multipage_support.shmpsize));
1431  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1432    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1433  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1434    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1435  st->print_cr("  Multipage error: %d",
1436    g_multipage_support.error);
1437  st->cr();
1438  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1439
1440  // print out LDR_CNTRL because it affects the default page sizes
1441  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1442  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1443
1444  // Print out EXTSHM because it is an unsupported setting.
1445  const char* const extshm = ::getenv("EXTSHM");
1446  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1447  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1448    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1449  }
1450
1451  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1452  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1453  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1454      aixthread_guardpages ? aixthread_guardpages : "<unset>");
1455
1456  os::Aix::meminfo_t mi;
1457  if (os::Aix::get_meminfo(&mi)) {
1458    char buffer[256];
1459    if (os::Aix::on_aix()) {
1460      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1461      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1462      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1463      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1464    } else {
1465      // PASE - Numbers are result of QWCRSSTS; they mean:
1466      // real_total: Sum of all system pools
1467      // real_free: always 0
1468      // pgsp_total: we take the size of the system ASP
1469      // pgsp_free: size of system ASP times percentage of system ASP unused
1470      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1471      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1472      st->print_cr("%% system asp used : " SIZE_FORMAT,
1473        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1474    }
1475    st->print_raw(buffer);
1476  }
1477  st->cr();
1478
1479  // Print segments allocated with os::reserve_memory.
1480  st->print_cr("internal virtual memory regions used by vm:");
1481  vmembk_print_on(st);
1482}
1483
1484// Get a string for the cpuinfo that is a summary of the cpu type
1485void os::get_summary_cpu_info(char* buf, size_t buflen) {
1486  // This looks good
1487  libperfstat::cpuinfo_t ci;
1488  if (libperfstat::get_cpuinfo(&ci)) {
1489    strncpy(buf, ci.version, buflen);
1490  } else {
1491    strncpy(buf, "AIX", buflen);
1492  }
1493}
1494
1495void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1496  st->print("CPU:");
1497  st->print("total %d", os::processor_count());
1498  // It's not safe to query number of active processors after crash.
1499  // st->print("(active %d)", os::active_processor_count());
1500  st->print(" %s", VM_Version::features());
1501  st->cr();
1502}
1503
1504static void print_signal_handler(outputStream* st, int sig,
1505                                 char* buf, size_t buflen);
1506
1507void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1508  st->print_cr("Signal Handlers:");
1509  print_signal_handler(st, SIGSEGV, buf, buflen);
1510  print_signal_handler(st, SIGBUS , buf, buflen);
1511  print_signal_handler(st, SIGFPE , buf, buflen);
1512  print_signal_handler(st, SIGPIPE, buf, buflen);
1513  print_signal_handler(st, SIGXFSZ, buf, buflen);
1514  print_signal_handler(st, SIGILL , buf, buflen);
1515  print_signal_handler(st, SR_signum, buf, buflen);
1516  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1517  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1518  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1519  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1520  print_signal_handler(st, SIGTRAP, buf, buflen);
1521  // We also want to know if someone else adds a SIGDANGER handler because
1522  // that will interfere with OOM killling.
1523  print_signal_handler(st, SIGDANGER, buf, buflen);
1524}
1525
1526static char saved_jvm_path[MAXPATHLEN] = {0};
1527
1528// Find the full path to the current module, libjvm.so.
1529void os::jvm_path(char *buf, jint buflen) {
1530  // Error checking.
1531  if (buflen < MAXPATHLEN) {
1532    assert(false, "must use a large-enough buffer");
1533    buf[0] = '\0';
1534    return;
1535  }
1536  // Lazy resolve the path to current module.
1537  if (saved_jvm_path[0] != 0) {
1538    strcpy(buf, saved_jvm_path);
1539    return;
1540  }
1541
1542  Dl_info dlinfo;
1543  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1544  assert(ret != 0, "cannot locate libjvm");
1545  char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1546  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1547
1548  if (Arguments::sun_java_launcher_is_altjvm()) {
1549    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1550    // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1551    // If "/jre/lib/" appears at the right place in the string, then
1552    // assume we are installed in a JDK and we're done. Otherwise, check
1553    // for a JAVA_HOME environment variable and fix up the path so it
1554    // looks like libjvm.so is installed there (append a fake suffix
1555    // hotspot/libjvm.so).
1556    const char *p = buf + strlen(buf) - 1;
1557    for (int count = 0; p > buf && count < 4; ++count) {
1558      for (--p; p > buf && *p != '/'; --p)
1559        /* empty */ ;
1560    }
1561
1562    if (strncmp(p, "/jre/lib/", 9) != 0) {
1563      // Look for JAVA_HOME in the environment.
1564      char* java_home_var = ::getenv("JAVA_HOME");
1565      if (java_home_var != NULL && java_home_var[0] != 0) {
1566        char* jrelib_p;
1567        int len;
1568
1569        // Check the current module name "libjvm.so".
1570        p = strrchr(buf, '/');
1571        if (p == NULL) {
1572          return;
1573        }
1574        assert(strstr(p, "/libjvm") == p, "invalid library name");
1575
1576        rp = os::Posix::realpath(java_home_var, buf, buflen);
1577        if (rp == NULL) {
1578          return;
1579        }
1580
1581        // determine if this is a legacy image or modules image
1582        // modules image doesn't have "jre" subdirectory
1583        len = strlen(buf);
1584        assert(len < buflen, "Ran out of buffer room");
1585        jrelib_p = buf + len;
1586        snprintf(jrelib_p, buflen-len, "/jre/lib");
1587        if (0 != access(buf, F_OK)) {
1588          snprintf(jrelib_p, buflen-len, "/lib");
1589        }
1590
1591        if (0 == access(buf, F_OK)) {
1592          // Use current module name "libjvm.so"
1593          len = strlen(buf);
1594          snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1595        } else {
1596          // Go back to path of .so
1597          rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1598          if (rp == NULL) {
1599            return;
1600          }
1601        }
1602      }
1603    }
1604  }
1605
1606  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1607  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1608}
1609
1610void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1611  // no prefix required, not even "_"
1612}
1613
1614void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1615  // no suffix required
1616}
1617
1618////////////////////////////////////////////////////////////////////////////////
1619// sun.misc.Signal support
1620
1621static volatile jint sigint_count = 0;
1622
1623static void
1624UserHandler(int sig, void *siginfo, void *context) {
1625  // 4511530 - sem_post is serialized and handled by the manager thread. When
1626  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1627  // don't want to flood the manager thread with sem_post requests.
1628  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1629    return;
1630
1631  // Ctrl-C is pressed during error reporting, likely because the error
1632  // handler fails to abort. Let VM die immediately.
1633  if (sig == SIGINT && VMError::is_error_reported()) {
1634    os::die();
1635  }
1636
1637  os::signal_notify(sig);
1638}
1639
1640void* os::user_handler() {
1641  return CAST_FROM_FN_PTR(void*, UserHandler);
1642}
1643
1644extern "C" {
1645  typedef void (*sa_handler_t)(int);
1646  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1647}
1648
1649void* os::signal(int signal_number, void* handler) {
1650  struct sigaction sigAct, oldSigAct;
1651
1652  sigfillset(&(sigAct.sa_mask));
1653
1654  // Do not block out synchronous signals in the signal handler.
1655  // Blocking synchronous signals only makes sense if you can really
1656  // be sure that those signals won't happen during signal handling,
1657  // when the blocking applies. Normal signal handlers are lean and
1658  // do not cause signals. But our signal handlers tend to be "risky"
1659  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1660  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1661  // by a SIGILL, which was blocked due to the signal mask. The process
1662  // just hung forever. Better to crash from a secondary signal than to hang.
1663  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1664  sigdelset(&(sigAct.sa_mask), SIGBUS);
1665  sigdelset(&(sigAct.sa_mask), SIGILL);
1666  sigdelset(&(sigAct.sa_mask), SIGFPE);
1667  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1668
1669  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1670
1671  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1672
1673  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1674    // -1 means registration failed
1675    return (void *)-1;
1676  }
1677
1678  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1679}
1680
1681void os::signal_raise(int signal_number) {
1682  ::raise(signal_number);
1683}
1684
1685//
1686// The following code is moved from os.cpp for making this
1687// code platform specific, which it is by its very nature.
1688//
1689
1690// Will be modified when max signal is changed to be dynamic
1691int os::sigexitnum_pd() {
1692  return NSIG;
1693}
1694
1695// a counter for each possible signal value
1696static volatile jint pending_signals[NSIG+1] = { 0 };
1697
1698// Wrapper functions for: sem_init(), sem_post(), sem_wait()
1699// On AIX, we use sem_init(), sem_post(), sem_wait()
1700// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1701// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1702// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1703// on AIX, msem_..() calls are suspected of causing problems.
1704static sem_t sig_sem;
1705static msemaphore* p_sig_msem = 0;
1706
1707static void local_sem_init() {
1708  if (os::Aix::on_aix()) {
1709    int rc = ::sem_init(&sig_sem, 0, 0);
1710    guarantee(rc != -1, "sem_init failed");
1711  } else {
1712    // Memory semaphores must live in shared mem.
1713    guarantee0(p_sig_msem == NULL);
1714    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1715    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1716    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1717  }
1718}
1719
1720static void local_sem_post() {
1721  static bool warn_only_once = false;
1722  if (os::Aix::on_aix()) {
1723    int rc = ::sem_post(&sig_sem);
1724    if (rc == -1 && !warn_only_once) {
1725      trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1726      warn_only_once = true;
1727    }
1728  } else {
1729    guarantee0(p_sig_msem != NULL);
1730    int rc = ::msem_unlock(p_sig_msem, 0);
1731    if (rc == -1 && !warn_only_once) {
1732      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1733      warn_only_once = true;
1734    }
1735  }
1736}
1737
1738static void local_sem_wait() {
1739  static bool warn_only_once = false;
1740  if (os::Aix::on_aix()) {
1741    int rc = ::sem_wait(&sig_sem);
1742    if (rc == -1 && !warn_only_once) {
1743      trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1744      warn_only_once = true;
1745    }
1746  } else {
1747    guarantee0(p_sig_msem != NULL); // must init before use
1748    int rc = ::msem_lock(p_sig_msem, 0);
1749    if (rc == -1 && !warn_only_once) {
1750      trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1751      warn_only_once = true;
1752    }
1753  }
1754}
1755
1756void os::signal_init_pd() {
1757  // Initialize signal structures
1758  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1759
1760  // Initialize signal semaphore
1761  local_sem_init();
1762}
1763
1764void os::signal_notify(int sig) {
1765  Atomic::inc(&pending_signals[sig]);
1766  local_sem_post();
1767}
1768
1769static int check_pending_signals(bool wait) {
1770  Atomic::store(0, &sigint_count);
1771  for (;;) {
1772    for (int i = 0; i < NSIG + 1; i++) {
1773      jint n = pending_signals[i];
1774      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1775        return i;
1776      }
1777    }
1778    if (!wait) {
1779      return -1;
1780    }
1781    JavaThread *thread = JavaThread::current();
1782    ThreadBlockInVM tbivm(thread);
1783
1784    bool threadIsSuspended;
1785    do {
1786      thread->set_suspend_equivalent();
1787      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1788
1789      local_sem_wait();
1790
1791      // were we externally suspended while we were waiting?
1792      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1793      if (threadIsSuspended) {
1794        //
1795        // The semaphore has been incremented, but while we were waiting
1796        // another thread suspended us. We don't want to continue running
1797        // while suspended because that would surprise the thread that
1798        // suspended us.
1799        //
1800
1801        local_sem_post();
1802
1803        thread->java_suspend_self();
1804      }
1805    } while (threadIsSuspended);
1806  }
1807}
1808
1809int os::signal_lookup() {
1810  return check_pending_signals(false);
1811}
1812
1813int os::signal_wait() {
1814  return check_pending_signals(true);
1815}
1816
1817////////////////////////////////////////////////////////////////////////////////
1818// Virtual Memory
1819
1820// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1821
1822#define VMEM_MAPPED  1
1823#define VMEM_SHMATED 2
1824
1825struct vmembk_t {
1826  int type;         // 1 - mmap, 2 - shmat
1827  char* addr;
1828  size_t size;      // Real size, may be larger than usersize.
1829  size_t pagesize;  // page size of area
1830  vmembk_t* next;
1831
1832  bool contains_addr(char* p) const {
1833    return p >= addr && p < (addr + size);
1834  }
1835
1836  bool contains_range(char* p, size_t s) const {
1837    return contains_addr(p) && contains_addr(p + s - 1);
1838  }
1839
1840  void print_on(outputStream* os) const {
1841    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1842      " bytes, %d %s pages), %s",
1843      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1844      (type == VMEM_SHMATED ? "shmat" : "mmap")
1845    );
1846  }
1847
1848  // Check that range is a sub range of memory block (or equal to memory block);
1849  // also check that range is fully page aligned to the page size if the block.
1850  void assert_is_valid_subrange(char* p, size_t s) const {
1851    if (!contains_range(p, s)) {
1852      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1853              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1854              p, p + s, addr, addr + size);
1855      guarantee0(false);
1856    }
1857    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1858      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1859              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1860      guarantee0(false);
1861    }
1862  }
1863};
1864
1865static struct {
1866  vmembk_t* first;
1867  MiscUtils::CritSect cs;
1868} vmem;
1869
1870static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1871  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1872  assert0(p);
1873  if (p) {
1874    MiscUtils::AutoCritSect lck(&vmem.cs);
1875    p->addr = addr; p->size = size;
1876    p->pagesize = pagesize;
1877    p->type = type;
1878    p->next = vmem.first;
1879    vmem.first = p;
1880  }
1881}
1882
1883static vmembk_t* vmembk_find(char* addr) {
1884  MiscUtils::AutoCritSect lck(&vmem.cs);
1885  for (vmembk_t* p = vmem.first; p; p = p->next) {
1886    if (p->addr <= addr && (p->addr + p->size) > addr) {
1887      return p;
1888    }
1889  }
1890  return NULL;
1891}
1892
1893static void vmembk_remove(vmembk_t* p0) {
1894  MiscUtils::AutoCritSect lck(&vmem.cs);
1895  assert0(p0);
1896  assert0(vmem.first); // List should not be empty.
1897  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1898    if (*pp == p0) {
1899      *pp = p0->next;
1900      ::free(p0);
1901      return;
1902    }
1903  }
1904  assert0(false); // Not found?
1905}
1906
1907static void vmembk_print_on(outputStream* os) {
1908  MiscUtils::AutoCritSect lck(&vmem.cs);
1909  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1910    vmi->print_on(os);
1911    os->cr();
1912  }
1913}
1914
1915// Reserve and attach a section of System V memory.
1916// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1917// address. Failing that, it will attach the memory anywhere.
1918// If <requested_addr> is NULL, function will attach the memory anywhere.
1919//
1920// <alignment_hint> is being ignored by this function. It is very probable however that the
1921// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1922// Should this be not enogh, we can put more work into it.
1923static char* reserve_shmated_memory (
1924  size_t bytes,
1925  char* requested_addr,
1926  size_t alignment_hint) {
1927
1928  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1929    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1930    bytes, requested_addr, alignment_hint);
1931
1932  // Either give me wish address or wish alignment but not both.
1933  assert0(!(requested_addr != NULL && alignment_hint != 0));
1934
1935  // We must prevent anyone from attaching too close to the
1936  // BRK because that may cause malloc OOM.
1937  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1938    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1939      "Will attach anywhere.", requested_addr);
1940    // Act like the OS refused to attach there.
1941    requested_addr = NULL;
1942  }
1943
1944  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1945  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1946  if (os::Aix::on_pase_V5R4_or_older()) {
1947    ShouldNotReachHere();
1948  }
1949
1950  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1951  const size_t size = align_up(bytes, 64*K);
1952
1953  // Reserve the shared segment.
1954  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1955  if (shmid == -1) {
1956    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1957    return NULL;
1958  }
1959
1960  // Important note:
1961  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1962  // We must right after attaching it remove it from the system. System V shm segments are global and
1963  // survive the process.
1964  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1965
1966  struct shmid_ds shmbuf;
1967  memset(&shmbuf, 0, sizeof(shmbuf));
1968  shmbuf.shm_pagesize = 64*K;
1969  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1970    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1971               size / (64*K), errno);
1972    // I want to know if this ever happens.
1973    assert(false, "failed to set page size for shmat");
1974  }
1975
1976  // Now attach the shared segment.
1977  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1978  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1979  // were not a segment boundary.
1980  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1981  const int errno_shmat = errno;
1982
1983  // (A) Right after shmat and before handing shmat errors delete the shm segment.
1984  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1985    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1986    assert(false, "failed to remove shared memory segment!");
1987  }
1988
1989  // Handle shmat error. If we failed to attach, just return.
1990  if (addr == (char*)-1) {
1991    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1992    return NULL;
1993  }
1994
1995  // Just for info: query the real page size. In case setting the page size did not
1996  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1997  const size_t real_pagesize = os::Aix::query_pagesize(addr);
1998  if (real_pagesize != shmbuf.shm_pagesize) {
1999    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2000  }
2001
2002  if (addr) {
2003    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2004      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2005  } else {
2006    if (requested_addr != NULL) {
2007      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2008    } else {
2009      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2010    }
2011  }
2012
2013  // book-keeping
2014  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2015  assert0(is_aligned_to(addr, os::vm_page_size()));
2016
2017  return addr;
2018}
2019
2020static bool release_shmated_memory(char* addr, size_t size) {
2021
2022  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2023    addr, addr + size - 1);
2024
2025  bool rc = false;
2026
2027  // TODO: is there a way to verify shm size without doing bookkeeping?
2028  if (::shmdt(addr) != 0) {
2029    trcVerbose("error (%d).", errno);
2030  } else {
2031    trcVerbose("ok.");
2032    rc = true;
2033  }
2034  return rc;
2035}
2036
2037static bool uncommit_shmated_memory(char* addr, size_t size) {
2038  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2039    addr, addr + size - 1);
2040
2041  const bool rc = my_disclaim64(addr, size);
2042
2043  if (!rc) {
2044    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2045    return false;
2046  }
2047  return true;
2048}
2049
2050////////////////////////////////  mmap-based routines /////////////////////////////////
2051
2052// Reserve memory via mmap.
2053// If <requested_addr> is given, an attempt is made to attach at the given address.
2054// Failing that, memory is allocated at any address.
2055// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2056// allocate at an address aligned with the given alignment. Failing that, memory
2057// is aligned anywhere.
2058static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2059  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2060    "alignment_hint " UINTX_FORMAT "...",
2061    bytes, requested_addr, alignment_hint);
2062
2063  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2064  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2065    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2066    return NULL;
2067  }
2068
2069  // We must prevent anyone from attaching too close to the
2070  // BRK because that may cause malloc OOM.
2071  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2072    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2073      "Will attach anywhere.", requested_addr);
2074    // Act like the OS refused to attach there.
2075    requested_addr = NULL;
2076  }
2077
2078  // Specify one or the other but not both.
2079  assert0(!(requested_addr != NULL && alignment_hint > 0));
2080
2081  // In 64K mode, we claim the global page size (os::vm_page_size())
2082  // is 64K. This is one of the few points where that illusion may
2083  // break, because mmap() will always return memory aligned to 4K. So
2084  // we must ensure we only ever return memory aligned to 64k.
2085  if (alignment_hint) {
2086    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2087  } else {
2088    alignment_hint = os::vm_page_size();
2089  }
2090
2091  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2092  const size_t size = align_up(bytes, os::vm_page_size());
2093
2094  // alignment: Allocate memory large enough to include an aligned range of the right size and
2095  // cut off the leading and trailing waste pages.
2096  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2097  const size_t extra_size = size + alignment_hint;
2098
2099  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2100  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2101  int flags = MAP_ANONYMOUS | MAP_SHARED;
2102
2103  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2104  // it means if wishaddress is given but MAP_FIXED is not set.
2105  //
2106  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2107  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2108  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2109  // get clobbered.
2110  if (requested_addr != NULL) {
2111    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2112      flags |= MAP_FIXED;
2113    }
2114  }
2115
2116  char* addr = (char*)::mmap(requested_addr, extra_size,
2117      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2118
2119  if (addr == MAP_FAILED) {
2120    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2121    return NULL;
2122  }
2123
2124  // Handle alignment.
2125  char* const addr_aligned = align_up(addr, alignment_hint);
2126  const size_t waste_pre = addr_aligned - addr;
2127  char* const addr_aligned_end = addr_aligned + size;
2128  const size_t waste_post = extra_size - waste_pre - size;
2129  if (waste_pre > 0) {
2130    ::munmap(addr, waste_pre);
2131  }
2132  if (waste_post > 0) {
2133    ::munmap(addr_aligned_end, waste_post);
2134  }
2135  addr = addr_aligned;
2136
2137  if (addr) {
2138    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2139      addr, addr + bytes, bytes);
2140  } else {
2141    if (requested_addr != NULL) {
2142      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2143    } else {
2144      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2145    }
2146  }
2147
2148  // bookkeeping
2149  vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2150
2151  // Test alignment, see above.
2152  assert0(is_aligned_to(addr, os::vm_page_size()));
2153
2154  return addr;
2155}
2156
2157static bool release_mmaped_memory(char* addr, size_t size) {
2158  assert0(is_aligned_to(addr, os::vm_page_size()));
2159  assert0(is_aligned_to(size, os::vm_page_size()));
2160
2161  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2162    addr, addr + size - 1);
2163  bool rc = false;
2164
2165  if (::munmap(addr, size) != 0) {
2166    trcVerbose("failed (%d)\n", errno);
2167    rc = false;
2168  } else {
2169    trcVerbose("ok.");
2170    rc = true;
2171  }
2172
2173  return rc;
2174}
2175
2176static bool uncommit_mmaped_memory(char* addr, size_t size) {
2177
2178  assert0(is_aligned_to(addr, os::vm_page_size()));
2179  assert0(is_aligned_to(size, os::vm_page_size()));
2180
2181  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2182    addr, addr + size - 1);
2183  bool rc = false;
2184
2185  // Uncommit mmap memory with msync MS_INVALIDATE.
2186  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2187    trcVerbose("failed (%d)\n", errno);
2188    rc = false;
2189  } else {
2190    trcVerbose("ok.");
2191    rc = true;
2192  }
2193
2194  return rc;
2195}
2196
2197int os::vm_page_size() {
2198  // Seems redundant as all get out.
2199  assert(os::Aix::page_size() != -1, "must call os::init");
2200  return os::Aix::page_size();
2201}
2202
2203// Aix allocates memory by pages.
2204int os::vm_allocation_granularity() {
2205  assert(os::Aix::page_size() != -1, "must call os::init");
2206  return os::Aix::page_size();
2207}
2208
2209#ifdef PRODUCT
2210static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2211                                    int err) {
2212  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2213          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2214          os::errno_name(err), err);
2215}
2216#endif
2217
2218void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2219                                  const char* mesg) {
2220  assert(mesg != NULL, "mesg must be specified");
2221  if (!pd_commit_memory(addr, size, exec)) {
2222    // Add extra info in product mode for vm_exit_out_of_memory():
2223    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2224    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2225  }
2226}
2227
2228bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2229
2230  assert(is_aligned_to(addr, os::vm_page_size()),
2231    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2232    p2i(addr), os::vm_page_size());
2233  assert(is_aligned_to(size, os::vm_page_size()),
2234    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2235    size, os::vm_page_size());
2236
2237  vmembk_t* const vmi = vmembk_find(addr);
2238  guarantee0(vmi);
2239  vmi->assert_is_valid_subrange(addr, size);
2240
2241  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2242
2243  if (UseExplicitCommit) {
2244    // AIX commits memory on touch. So, touch all pages to be committed.
2245    for (char* p = addr; p < (addr + size); p += 4*K) {
2246      *p = '\0';
2247    }
2248  }
2249
2250  return true;
2251}
2252
2253bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2254  return pd_commit_memory(addr, size, exec);
2255}
2256
2257void os::pd_commit_memory_or_exit(char* addr, size_t size,
2258                                  size_t alignment_hint, bool exec,
2259                                  const char* mesg) {
2260  // Alignment_hint is ignored on this OS.
2261  pd_commit_memory_or_exit(addr, size, exec, mesg);
2262}
2263
2264bool os::pd_uncommit_memory(char* addr, size_t size) {
2265  assert(is_aligned_to(addr, os::vm_page_size()),
2266    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2267    p2i(addr), os::vm_page_size());
2268  assert(is_aligned_to(size, os::vm_page_size()),
2269    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2270    size, os::vm_page_size());
2271
2272  // Dynamically do different things for mmap/shmat.
2273  const vmembk_t* const vmi = vmembk_find(addr);
2274  guarantee0(vmi);
2275  vmi->assert_is_valid_subrange(addr, size);
2276
2277  if (vmi->type == VMEM_SHMATED) {
2278    return uncommit_shmated_memory(addr, size);
2279  } else {
2280    return uncommit_mmaped_memory(addr, size);
2281  }
2282}
2283
2284bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2285  // Do not call this; no need to commit stack pages on AIX.
2286  ShouldNotReachHere();
2287  return true;
2288}
2289
2290bool os::remove_stack_guard_pages(char* addr, size_t size) {
2291  // Do not call this; no need to commit stack pages on AIX.
2292  ShouldNotReachHere();
2293  return true;
2294}
2295
2296void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2297}
2298
2299void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2300}
2301
2302void os::numa_make_global(char *addr, size_t bytes) {
2303}
2304
2305void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2306}
2307
2308bool os::numa_topology_changed() {
2309  return false;
2310}
2311
2312size_t os::numa_get_groups_num() {
2313  return 1;
2314}
2315
2316int os::numa_get_group_id() {
2317  return 0;
2318}
2319
2320size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2321  if (size > 0) {
2322    ids[0] = 0;
2323    return 1;
2324  }
2325  return 0;
2326}
2327
2328bool os::get_page_info(char *start, page_info* info) {
2329  return false;
2330}
2331
2332char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2333  return end;
2334}
2335
2336// Reserves and attaches a shared memory segment.
2337// Will assert if a wish address is given and could not be obtained.
2338char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2339
2340  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2341  // thereby clobbering old mappings at that place. That is probably
2342  // not intended, never used and almost certainly an error were it
2343  // ever be used this way (to try attaching at a specified address
2344  // without clobbering old mappings an alternate API exists,
2345  // os::attempt_reserve_memory_at()).
2346  // Instead of mimicking the dangerous coding of the other platforms, here I
2347  // just ignore the request address (release) or assert(debug).
2348  assert0(requested_addr == NULL);
2349
2350  // Always round to os::vm_page_size(), which may be larger than 4K.
2351  bytes = align_up(bytes, os::vm_page_size());
2352  const size_t alignment_hint0 =
2353    alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2354
2355  // In 4K mode always use mmap.
2356  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2357  if (os::vm_page_size() == 4*K) {
2358    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2359  } else {
2360    if (bytes >= Use64KPagesThreshold) {
2361      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2362    } else {
2363      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2364    }
2365  }
2366}
2367
2368bool os::pd_release_memory(char* addr, size_t size) {
2369
2370  // Dynamically do different things for mmap/shmat.
2371  vmembk_t* const vmi = vmembk_find(addr);
2372  guarantee0(vmi);
2373
2374  // Always round to os::vm_page_size(), which may be larger than 4K.
2375  size = align_up(size, os::vm_page_size());
2376  addr = align_up(addr, os::vm_page_size());
2377
2378  bool rc = false;
2379  bool remove_bookkeeping = false;
2380  if (vmi->type == VMEM_SHMATED) {
2381    // For shmatted memory, we do:
2382    // - If user wants to release the whole range, release the memory (shmdt).
2383    // - If user only wants to release a partial range, uncommit (disclaim) that
2384    //   range. That way, at least, we do not use memory anymore (bust still page
2385    //   table space).
2386    vmi->assert_is_valid_subrange(addr, size);
2387    if (addr == vmi->addr && size == vmi->size) {
2388      rc = release_shmated_memory(addr, size);
2389      remove_bookkeeping = true;
2390    } else {
2391      rc = uncommit_shmated_memory(addr, size);
2392    }
2393  } else {
2394    // User may unmap partial regions but region has to be fully contained.
2395#ifdef ASSERT
2396    vmi->assert_is_valid_subrange(addr, size);
2397#endif
2398    rc = release_mmaped_memory(addr, size);
2399    remove_bookkeeping = true;
2400  }
2401
2402  // update bookkeeping
2403  if (rc && remove_bookkeeping) {
2404    vmembk_remove(vmi);
2405  }
2406
2407  return rc;
2408}
2409
2410static bool checked_mprotect(char* addr, size_t size, int prot) {
2411
2412  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2413  // not tell me if protection failed when trying to protect an un-protectable range.
2414  //
2415  // This means if the memory was allocated using shmget/shmat, protection wont work
2416  // but mprotect will still return 0:
2417  //
2418  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2419
2420  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2421
2422  if (!rc) {
2423    const char* const s_errno = os::errno_name(errno);
2424    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2425    return false;
2426  }
2427
2428  // mprotect success check
2429  //
2430  // Mprotect said it changed the protection but can I believe it?
2431  //
2432  // To be sure I need to check the protection afterwards. Try to
2433  // read from protected memory and check whether that causes a segfault.
2434  //
2435  if (!os::Aix::xpg_sus_mode()) {
2436
2437    if (CanUseSafeFetch32()) {
2438
2439      const bool read_protected =
2440        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2441         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2442
2443      if (prot & PROT_READ) {
2444        rc = !read_protected;
2445      } else {
2446        rc = read_protected;
2447      }
2448
2449      if (!rc) {
2450        if (os::Aix::on_pase()) {
2451          // There is an issue on older PASE systems where mprotect() will return success but the
2452          // memory will not be protected.
2453          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2454          // machines; we only see it rarely, when using mprotect() to protect the guard page of
2455          // a stack. It is an OS error.
2456          //
2457          // A valid strategy is just to try again. This usually works. :-/
2458
2459          ::usleep(1000);
2460          if (::mprotect(addr, size, prot) == 0) {
2461            const bool read_protected_2 =
2462              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2463              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2464            rc = true;
2465          }
2466        }
2467      }
2468    }
2469  }
2470
2471  assert(rc == true, "mprotect failed.");
2472
2473  return rc;
2474}
2475
2476// Set protections specified
2477bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2478  unsigned int p = 0;
2479  switch (prot) {
2480  case MEM_PROT_NONE: p = PROT_NONE; break;
2481  case MEM_PROT_READ: p = PROT_READ; break;
2482  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2483  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2484  default:
2485    ShouldNotReachHere();
2486  }
2487  // is_committed is unused.
2488  return checked_mprotect(addr, size, p);
2489}
2490
2491bool os::guard_memory(char* addr, size_t size) {
2492  return checked_mprotect(addr, size, PROT_NONE);
2493}
2494
2495bool os::unguard_memory(char* addr, size_t size) {
2496  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2497}
2498
2499// Large page support
2500
2501static size_t _large_page_size = 0;
2502
2503// Enable large page support if OS allows that.
2504void os::large_page_init() {
2505  return; // Nothing to do. See query_multipage_support and friends.
2506}
2507
2508char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2509  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2510  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2511  // so this is not needed.
2512  assert(false, "should not be called on AIX");
2513  return NULL;
2514}
2515
2516bool os::release_memory_special(char* base, size_t bytes) {
2517  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2518  Unimplemented();
2519  return false;
2520}
2521
2522size_t os::large_page_size() {
2523  return _large_page_size;
2524}
2525
2526bool os::can_commit_large_page_memory() {
2527  // Does not matter, we do not support huge pages.
2528  return false;
2529}
2530
2531bool os::can_execute_large_page_memory() {
2532  // Does not matter, we do not support huge pages.
2533  return false;
2534}
2535
2536// Reserve memory at an arbitrary address, only if that area is
2537// available (and not reserved for something else).
2538char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2539  char* addr = NULL;
2540
2541  // Always round to os::vm_page_size(), which may be larger than 4K.
2542  bytes = align_up(bytes, os::vm_page_size());
2543
2544  // In 4K mode always use mmap.
2545  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2546  if (os::vm_page_size() == 4*K) {
2547    return reserve_mmaped_memory(bytes, requested_addr, 0);
2548  } else {
2549    if (bytes >= Use64KPagesThreshold) {
2550      return reserve_shmated_memory(bytes, requested_addr, 0);
2551    } else {
2552      return reserve_mmaped_memory(bytes, requested_addr, 0);
2553    }
2554  }
2555
2556  return addr;
2557}
2558
2559size_t os::read(int fd, void *buf, unsigned int nBytes) {
2560  return ::read(fd, buf, nBytes);
2561}
2562
2563size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2564  return ::pread(fd, buf, nBytes, offset);
2565}
2566
2567void os::naked_short_sleep(jlong ms) {
2568  struct timespec req;
2569
2570  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2571  req.tv_sec = 0;
2572  if (ms > 0) {
2573    req.tv_nsec = (ms % 1000) * 1000000;
2574  }
2575  else {
2576    req.tv_nsec = 1;
2577  }
2578
2579  nanosleep(&req, NULL);
2580
2581  return;
2582}
2583
2584// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2585void os::infinite_sleep() {
2586  while (true) {    // sleep forever ...
2587    ::sleep(100);   // ... 100 seconds at a time
2588  }
2589}
2590
2591// Used to convert frequent JVM_Yield() to nops
2592bool os::dont_yield() {
2593  return DontYieldALot;
2594}
2595
2596void os::naked_yield() {
2597  sched_yield();
2598}
2599
2600////////////////////////////////////////////////////////////////////////////////
2601// thread priority support
2602
2603// From AIX manpage to pthread_setschedparam
2604// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2605//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2606//
2607// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2608// range from 40 to 80, where 40 is the least favored priority and 80
2609// is the most favored."
2610//
2611// (Actually, I doubt this even has an impact on AIX, as we do kernel
2612// scheduling there; however, this still leaves iSeries.)
2613//
2614// We use the same values for AIX and PASE.
2615int os::java_to_os_priority[CriticalPriority + 1] = {
2616  54,             // 0 Entry should never be used
2617
2618  55,             // 1 MinPriority
2619  55,             // 2
2620  56,             // 3
2621
2622  56,             // 4
2623  57,             // 5 NormPriority
2624  57,             // 6
2625
2626  58,             // 7
2627  58,             // 8
2628  59,             // 9 NearMaxPriority
2629
2630  60,             // 10 MaxPriority
2631
2632  60              // 11 CriticalPriority
2633};
2634
2635OSReturn os::set_native_priority(Thread* thread, int newpri) {
2636  if (!UseThreadPriorities) return OS_OK;
2637  pthread_t thr = thread->osthread()->pthread_id();
2638  int policy = SCHED_OTHER;
2639  struct sched_param param;
2640  param.sched_priority = newpri;
2641  int ret = pthread_setschedparam(thr, policy, &param);
2642
2643  if (ret != 0) {
2644    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2645        (int)thr, newpri, ret, os::errno_name(ret));
2646  }
2647  return (ret == 0) ? OS_OK : OS_ERR;
2648}
2649
2650OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2651  if (!UseThreadPriorities) {
2652    *priority_ptr = java_to_os_priority[NormPriority];
2653    return OS_OK;
2654  }
2655  pthread_t thr = thread->osthread()->pthread_id();
2656  int policy = SCHED_OTHER;
2657  struct sched_param param;
2658  int ret = pthread_getschedparam(thr, &policy, &param);
2659  *priority_ptr = param.sched_priority;
2660
2661  return (ret == 0) ? OS_OK : OS_ERR;
2662}
2663
2664// Hint to the underlying OS that a task switch would not be good.
2665// Void return because it's a hint and can fail.
2666void os::hint_no_preempt() {}
2667
2668////////////////////////////////////////////////////////////////////////////////
2669// suspend/resume support
2670
2671//  the low-level signal-based suspend/resume support is a remnant from the
2672//  old VM-suspension that used to be for java-suspension, safepoints etc,
2673//  within hotspot. Now there is a single use-case for this:
2674//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2675//      that runs in the watcher thread.
2676//  The remaining code is greatly simplified from the more general suspension
2677//  code that used to be used.
2678//
2679//  The protocol is quite simple:
2680//  - suspend:
2681//      - sends a signal to the target thread
2682//      - polls the suspend state of the osthread using a yield loop
2683//      - target thread signal handler (SR_handler) sets suspend state
2684//        and blocks in sigsuspend until continued
2685//  - resume:
2686//      - sets target osthread state to continue
2687//      - sends signal to end the sigsuspend loop in the SR_handler
2688//
2689//  Note that the SR_lock plays no role in this suspend/resume protocol,
2690//  but is checked for NULL in SR_handler as a thread termination indicator.
2691//
2692
2693static void resume_clear_context(OSThread *osthread) {
2694  osthread->set_ucontext(NULL);
2695  osthread->set_siginfo(NULL);
2696}
2697
2698static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2699  osthread->set_ucontext(context);
2700  osthread->set_siginfo(siginfo);
2701}
2702
2703//
2704// Handler function invoked when a thread's execution is suspended or
2705// resumed. We have to be careful that only async-safe functions are
2706// called here (Note: most pthread functions are not async safe and
2707// should be avoided.)
2708//
2709// Note: sigwait() is a more natural fit than sigsuspend() from an
2710// interface point of view, but sigwait() prevents the signal hander
2711// from being run. libpthread would get very confused by not having
2712// its signal handlers run and prevents sigwait()'s use with the
2713// mutex granting granting signal.
2714//
2715// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2716//
2717static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2718  // Save and restore errno to avoid confusing native code with EINTR
2719  // after sigsuspend.
2720  int old_errno = errno;
2721
2722  Thread* thread = Thread::current_or_null_safe();
2723  assert(thread != NULL, "Missing current thread in SR_handler");
2724
2725  // On some systems we have seen signal delivery get "stuck" until the signal
2726  // mask is changed as part of thread termination. Check that the current thread
2727  // has not already terminated (via SR_lock()) - else the following assertion
2728  // will fail because the thread is no longer a JavaThread as the ~JavaThread
2729  // destructor has completed.
2730
2731  if (thread->SR_lock() == NULL) {
2732    return;
2733  }
2734
2735  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2736
2737  OSThread* osthread = thread->osthread();
2738
2739  os::SuspendResume::State current = osthread->sr.state();
2740  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2741    suspend_save_context(osthread, siginfo, context);
2742
2743    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2744    os::SuspendResume::State state = osthread->sr.suspended();
2745    if (state == os::SuspendResume::SR_SUSPENDED) {
2746      sigset_t suspend_set;  // signals for sigsuspend()
2747
2748      // get current set of blocked signals and unblock resume signal
2749      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2750      sigdelset(&suspend_set, SR_signum);
2751
2752      // wait here until we are resumed
2753      while (1) {
2754        sigsuspend(&suspend_set);
2755
2756        os::SuspendResume::State result = osthread->sr.running();
2757        if (result == os::SuspendResume::SR_RUNNING) {
2758          break;
2759        }
2760      }
2761
2762    } else if (state == os::SuspendResume::SR_RUNNING) {
2763      // request was cancelled, continue
2764    } else {
2765      ShouldNotReachHere();
2766    }
2767
2768    resume_clear_context(osthread);
2769  } else if (current == os::SuspendResume::SR_RUNNING) {
2770    // request was cancelled, continue
2771  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2772    // ignore
2773  } else {
2774    ShouldNotReachHere();
2775  }
2776
2777  errno = old_errno;
2778}
2779
2780static int SR_initialize() {
2781  struct sigaction act;
2782  char *s;
2783  // Get signal number to use for suspend/resume
2784  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2785    int sig = ::strtol(s, 0, 10);
2786    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2787        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2788      SR_signum = sig;
2789    } else {
2790      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2791              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2792    }
2793  }
2794
2795  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2796        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2797
2798  sigemptyset(&SR_sigset);
2799  sigaddset(&SR_sigset, SR_signum);
2800
2801  // Set up signal handler for suspend/resume.
2802  act.sa_flags = SA_RESTART|SA_SIGINFO;
2803  act.sa_handler = (void (*)(int)) SR_handler;
2804
2805  // SR_signum is blocked by default.
2806  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2807
2808  if (sigaction(SR_signum, &act, 0) == -1) {
2809    return -1;
2810  }
2811
2812  // Save signal flag
2813  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2814  return 0;
2815}
2816
2817static int SR_finalize() {
2818  return 0;
2819}
2820
2821static int sr_notify(OSThread* osthread) {
2822  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2823  assert_status(status == 0, status, "pthread_kill");
2824  return status;
2825}
2826
2827// "Randomly" selected value for how long we want to spin
2828// before bailing out on suspending a thread, also how often
2829// we send a signal to a thread we want to resume
2830static const int RANDOMLY_LARGE_INTEGER = 1000000;
2831static const int RANDOMLY_LARGE_INTEGER2 = 100;
2832
2833// returns true on success and false on error - really an error is fatal
2834// but this seems the normal response to library errors
2835static bool do_suspend(OSThread* osthread) {
2836  assert(osthread->sr.is_running(), "thread should be running");
2837  // mark as suspended and send signal
2838
2839  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2840    // failed to switch, state wasn't running?
2841    ShouldNotReachHere();
2842    return false;
2843  }
2844
2845  if (sr_notify(osthread) != 0) {
2846    // try to cancel, switch to running
2847
2848    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2849    if (result == os::SuspendResume::SR_RUNNING) {
2850      // cancelled
2851      return false;
2852    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2853      // somehow managed to suspend
2854      return true;
2855    } else {
2856      ShouldNotReachHere();
2857      return false;
2858    }
2859  }
2860
2861  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2862
2863  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2864    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2865      os::naked_yield();
2866    }
2867
2868    // timeout, try to cancel the request
2869    if (n >= RANDOMLY_LARGE_INTEGER) {
2870      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2871      if (cancelled == os::SuspendResume::SR_RUNNING) {
2872        return false;
2873      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2874        return true;
2875      } else {
2876        ShouldNotReachHere();
2877        return false;
2878      }
2879    }
2880  }
2881
2882  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2883  return true;
2884}
2885
2886static void do_resume(OSThread* osthread) {
2887  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2888
2889  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2890    // failed to switch to WAKEUP_REQUEST
2891    ShouldNotReachHere();
2892    return;
2893  }
2894
2895  while (!osthread->sr.is_running()) {
2896    if (sr_notify(osthread) == 0) {
2897      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2898        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2899          os::naked_yield();
2900        }
2901      }
2902    } else {
2903      ShouldNotReachHere();
2904    }
2905  }
2906
2907  guarantee(osthread->sr.is_running(), "Must be running!");
2908}
2909
2910///////////////////////////////////////////////////////////////////////////////////
2911// signal handling (except suspend/resume)
2912
2913// This routine may be used by user applications as a "hook" to catch signals.
2914// The user-defined signal handler must pass unrecognized signals to this
2915// routine, and if it returns true (non-zero), then the signal handler must
2916// return immediately. If the flag "abort_if_unrecognized" is true, then this
2917// routine will never retun false (zero), but instead will execute a VM panic
2918// routine kill the process.
2919//
2920// If this routine returns false, it is OK to call it again. This allows
2921// the user-defined signal handler to perform checks either before or after
2922// the VM performs its own checks. Naturally, the user code would be making
2923// a serious error if it tried to handle an exception (such as a null check
2924// or breakpoint) that the VM was generating for its own correct operation.
2925//
2926// This routine may recognize any of the following kinds of signals:
2927//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2928// It should be consulted by handlers for any of those signals.
2929//
2930// The caller of this routine must pass in the three arguments supplied
2931// to the function referred to in the "sa_sigaction" (not the "sa_handler")
2932// field of the structure passed to sigaction(). This routine assumes that
2933// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2934//
2935// Note that the VM will print warnings if it detects conflicting signal
2936// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2937//
2938extern "C" JNIEXPORT int
2939JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2940
2941// Set thread signal mask (for some reason on AIX sigthreadmask() seems
2942// to be the thing to call; documentation is not terribly clear about whether
2943// pthread_sigmask also works, and if it does, whether it does the same.
2944bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2945  const int rc = ::pthread_sigmask(how, set, oset);
2946  // return value semantics differ slightly for error case:
2947  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2948  // (so, pthread_sigmask is more theadsafe for error handling)
2949  // But success is always 0.
2950  return rc == 0 ? true : false;
2951}
2952
2953// Function to unblock all signals which are, according
2954// to POSIX, typical program error signals. If they happen while being blocked,
2955// they typically will bring down the process immediately.
2956bool unblock_program_error_signals() {
2957  sigset_t set;
2958  ::sigemptyset(&set);
2959  ::sigaddset(&set, SIGILL);
2960  ::sigaddset(&set, SIGBUS);
2961  ::sigaddset(&set, SIGFPE);
2962  ::sigaddset(&set, SIGSEGV);
2963  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2964}
2965
2966// Renamed from 'signalHandler' to avoid collision with other shared libs.
2967void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2968  assert(info != NULL && uc != NULL, "it must be old kernel");
2969
2970  // Never leave program error signals blocked;
2971  // on all our platforms they would bring down the process immediately when
2972  // getting raised while being blocked.
2973  unblock_program_error_signals();
2974
2975  int orig_errno = errno;  // Preserve errno value over signal handler.
2976  JVM_handle_aix_signal(sig, info, uc, true);
2977  errno = orig_errno;
2978}
2979
2980// This boolean allows users to forward their own non-matching signals
2981// to JVM_handle_aix_signal, harmlessly.
2982bool os::Aix::signal_handlers_are_installed = false;
2983
2984// For signal-chaining
2985struct sigaction sigact[NSIG];
2986sigset_t sigs;
2987bool os::Aix::libjsig_is_loaded = false;
2988typedef struct sigaction *(*get_signal_t)(int);
2989get_signal_t os::Aix::get_signal_action = NULL;
2990
2991struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2992  struct sigaction *actp = NULL;
2993
2994  if (libjsig_is_loaded) {
2995    // Retrieve the old signal handler from libjsig
2996    actp = (*get_signal_action)(sig);
2997  }
2998  if (actp == NULL) {
2999    // Retrieve the preinstalled signal handler from jvm
3000    actp = get_preinstalled_handler(sig);
3001  }
3002
3003  return actp;
3004}
3005
3006static bool call_chained_handler(struct sigaction *actp, int sig,
3007                                 siginfo_t *siginfo, void *context) {
3008  // Call the old signal handler
3009  if (actp->sa_handler == SIG_DFL) {
3010    // It's more reasonable to let jvm treat it as an unexpected exception
3011    // instead of taking the default action.
3012    return false;
3013  } else if (actp->sa_handler != SIG_IGN) {
3014    if ((actp->sa_flags & SA_NODEFER) == 0) {
3015      // automaticlly block the signal
3016      sigaddset(&(actp->sa_mask), sig);
3017    }
3018
3019    sa_handler_t hand = NULL;
3020    sa_sigaction_t sa = NULL;
3021    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3022    // retrieve the chained handler
3023    if (siginfo_flag_set) {
3024      sa = actp->sa_sigaction;
3025    } else {
3026      hand = actp->sa_handler;
3027    }
3028
3029    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3030      actp->sa_handler = SIG_DFL;
3031    }
3032
3033    // try to honor the signal mask
3034    sigset_t oset;
3035    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3036
3037    // call into the chained handler
3038    if (siginfo_flag_set) {
3039      (*sa)(sig, siginfo, context);
3040    } else {
3041      (*hand)(sig);
3042    }
3043
3044    // restore the signal mask
3045    pthread_sigmask(SIG_SETMASK, &oset, 0);
3046  }
3047  // Tell jvm's signal handler the signal is taken care of.
3048  return true;
3049}
3050
3051bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3052  bool chained = false;
3053  // signal-chaining
3054  if (UseSignalChaining) {
3055    struct sigaction *actp = get_chained_signal_action(sig);
3056    if (actp != NULL) {
3057      chained = call_chained_handler(actp, sig, siginfo, context);
3058    }
3059  }
3060  return chained;
3061}
3062
3063size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3064  // Creating guard page is very expensive. Java thread has HotSpot
3065  // guard pages, only enable glibc guard page for non-Java threads.
3066  // (Remember: compiler thread is a Java thread, too!)
3067  //
3068  // Aix can have different page sizes for stack (4K) and heap (64K).
3069  // As Hotspot knows only one page size, we assume the stack has
3070  // the same page size as the heap. Returning page_size() here can
3071  // cause 16 guard pages which we want to avoid.  Thus we return 4K
3072  // which will be rounded to the real page size by the OS.
3073  return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3074}
3075
3076struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3077  if (sigismember(&sigs, sig)) {
3078    return &sigact[sig];
3079  }
3080  return NULL;
3081}
3082
3083void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3084  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3085  sigact[sig] = oldAct;
3086  sigaddset(&sigs, sig);
3087}
3088
3089// for diagnostic
3090int sigflags[NSIG];
3091
3092int os::Aix::get_our_sigflags(int sig) {
3093  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3094  return sigflags[sig];
3095}
3096
3097void os::Aix::set_our_sigflags(int sig, int flags) {
3098  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3099  if (sig > 0 && sig < NSIG) {
3100    sigflags[sig] = flags;
3101  }
3102}
3103
3104void os::Aix::set_signal_handler(int sig, bool set_installed) {
3105  // Check for overwrite.
3106  struct sigaction oldAct;
3107  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3108
3109  void* oldhand = oldAct.sa_sigaction
3110    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3111    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3112  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3113      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3114      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3115    if (AllowUserSignalHandlers || !set_installed) {
3116      // Do not overwrite; user takes responsibility to forward to us.
3117      return;
3118    } else if (UseSignalChaining) {
3119      // save the old handler in jvm
3120      save_preinstalled_handler(sig, oldAct);
3121      // libjsig also interposes the sigaction() call below and saves the
3122      // old sigaction on it own.
3123    } else {
3124      fatal("Encountered unexpected pre-existing sigaction handler "
3125            "%#lx for signal %d.", (long)oldhand, sig);
3126    }
3127  }
3128
3129  struct sigaction sigAct;
3130  sigfillset(&(sigAct.sa_mask));
3131  if (!set_installed) {
3132    sigAct.sa_handler = SIG_DFL;
3133    sigAct.sa_flags = SA_RESTART;
3134  } else {
3135    sigAct.sa_sigaction = javaSignalHandler;
3136    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3137  }
3138  // Save flags, which are set by ours
3139  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3140  sigflags[sig] = sigAct.sa_flags;
3141
3142  int ret = sigaction(sig, &sigAct, &oldAct);
3143  assert(ret == 0, "check");
3144
3145  void* oldhand2 = oldAct.sa_sigaction
3146                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3147                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3148  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3149}
3150
3151// install signal handlers for signals that HotSpot needs to
3152// handle in order to support Java-level exception handling.
3153void os::Aix::install_signal_handlers() {
3154  if (!signal_handlers_are_installed) {
3155    signal_handlers_are_installed = true;
3156
3157    // signal-chaining
3158    typedef void (*signal_setting_t)();
3159    signal_setting_t begin_signal_setting = NULL;
3160    signal_setting_t end_signal_setting = NULL;
3161    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3162                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3163    if (begin_signal_setting != NULL) {
3164      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3165                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3166      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3167                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3168      libjsig_is_loaded = true;
3169      assert(UseSignalChaining, "should enable signal-chaining");
3170    }
3171    if (libjsig_is_loaded) {
3172      // Tell libjsig jvm is setting signal handlers.
3173      (*begin_signal_setting)();
3174    }
3175
3176    ::sigemptyset(&sigs);
3177    set_signal_handler(SIGSEGV, true);
3178    set_signal_handler(SIGPIPE, true);
3179    set_signal_handler(SIGBUS, true);
3180    set_signal_handler(SIGILL, true);
3181    set_signal_handler(SIGFPE, true);
3182    set_signal_handler(SIGTRAP, true);
3183    set_signal_handler(SIGXFSZ, true);
3184
3185    if (libjsig_is_loaded) {
3186      // Tell libjsig jvm finishes setting signal handlers.
3187      (*end_signal_setting)();
3188    }
3189
3190    // We don't activate signal checker if libjsig is in place, we trust ourselves
3191    // and if UserSignalHandler is installed all bets are off.
3192    // Log that signal checking is off only if -verbose:jni is specified.
3193    if (CheckJNICalls) {
3194      if (libjsig_is_loaded) {
3195        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3196        check_signals = false;
3197      }
3198      if (AllowUserSignalHandlers) {
3199        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3200        check_signals = false;
3201      }
3202      // Need to initialize check_signal_done.
3203      ::sigemptyset(&check_signal_done);
3204    }
3205  }
3206}
3207
3208static const char* get_signal_handler_name(address handler,
3209                                           char* buf, int buflen) {
3210  int offset;
3211  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3212  if (found) {
3213    // skip directory names
3214    const char *p1, *p2;
3215    p1 = buf;
3216    size_t len = strlen(os::file_separator());
3217    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3218    // The way os::dll_address_to_library_name is implemented on Aix
3219    // right now, it always returns -1 for the offset which is not
3220    // terribly informative.
3221    // Will fix that. For now, omit the offset.
3222    jio_snprintf(buf, buflen, "%s", p1);
3223  } else {
3224    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3225  }
3226  return buf;
3227}
3228
3229static void print_signal_handler(outputStream* st, int sig,
3230                                 char* buf, size_t buflen) {
3231  struct sigaction sa;
3232  sigaction(sig, NULL, &sa);
3233
3234  st->print("%s: ", os::exception_name(sig, buf, buflen));
3235
3236  address handler = (sa.sa_flags & SA_SIGINFO)
3237    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3238    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3239
3240  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3241    st->print("SIG_DFL");
3242  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3243    st->print("SIG_IGN");
3244  } else {
3245    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3246  }
3247
3248  // Print readable mask.
3249  st->print(", sa_mask[0]=");
3250  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3251
3252  address rh = VMError::get_resetted_sighandler(sig);
3253  // May be, handler was resetted by VMError?
3254  if (rh != NULL) {
3255    handler = rh;
3256    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3257  }
3258
3259  // Print textual representation of sa_flags.
3260  st->print(", sa_flags=");
3261  os::Posix::print_sa_flags(st, sa.sa_flags);
3262
3263  // Check: is it our handler?
3264  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3265      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3266    // It is our signal handler.
3267    // Check for flags, reset system-used one!
3268    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3269      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3270                os::Aix::get_our_sigflags(sig));
3271    }
3272  }
3273  st->cr();
3274}
3275
3276#define DO_SIGNAL_CHECK(sig) \
3277  if (!sigismember(&check_signal_done, sig)) \
3278    os::Aix::check_signal_handler(sig)
3279
3280// This method is a periodic task to check for misbehaving JNI applications
3281// under CheckJNI, we can add any periodic checks here
3282
3283void os::run_periodic_checks() {
3284
3285  if (check_signals == false) return;
3286
3287  // SEGV and BUS if overridden could potentially prevent
3288  // generation of hs*.log in the event of a crash, debugging
3289  // such a case can be very challenging, so we absolutely
3290  // check the following for a good measure:
3291  DO_SIGNAL_CHECK(SIGSEGV);
3292  DO_SIGNAL_CHECK(SIGILL);
3293  DO_SIGNAL_CHECK(SIGFPE);
3294  DO_SIGNAL_CHECK(SIGBUS);
3295  DO_SIGNAL_CHECK(SIGPIPE);
3296  DO_SIGNAL_CHECK(SIGXFSZ);
3297  if (UseSIGTRAP) {
3298    DO_SIGNAL_CHECK(SIGTRAP);
3299  }
3300
3301  // ReduceSignalUsage allows the user to override these handlers
3302  // see comments at the very top and jvm_solaris.h
3303  if (!ReduceSignalUsage) {
3304    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3305    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3306    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3307    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3308  }
3309
3310  DO_SIGNAL_CHECK(SR_signum);
3311}
3312
3313typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3314
3315static os_sigaction_t os_sigaction = NULL;
3316
3317void os::Aix::check_signal_handler(int sig) {
3318  char buf[O_BUFLEN];
3319  address jvmHandler = NULL;
3320
3321  struct sigaction act;
3322  if (os_sigaction == NULL) {
3323    // only trust the default sigaction, in case it has been interposed
3324    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3325    if (os_sigaction == NULL) return;
3326  }
3327
3328  os_sigaction(sig, (struct sigaction*)NULL, &act);
3329
3330  address thisHandler = (act.sa_flags & SA_SIGINFO)
3331    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3332    : CAST_FROM_FN_PTR(address, act.sa_handler);
3333
3334  switch(sig) {
3335  case SIGSEGV:
3336  case SIGBUS:
3337  case SIGFPE:
3338  case SIGPIPE:
3339  case SIGILL:
3340  case SIGXFSZ:
3341    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3342    break;
3343
3344  case SHUTDOWN1_SIGNAL:
3345  case SHUTDOWN2_SIGNAL:
3346  case SHUTDOWN3_SIGNAL:
3347  case BREAK_SIGNAL:
3348    jvmHandler = (address)user_handler();
3349    break;
3350
3351  default:
3352    if (sig == SR_signum) {
3353      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3354    } else {
3355      return;
3356    }
3357    break;
3358  }
3359
3360  if (thisHandler != jvmHandler) {
3361    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3362    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3363    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3364    // No need to check this sig any longer
3365    sigaddset(&check_signal_done, sig);
3366    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3367    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3368      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3369                    exception_name(sig, buf, O_BUFLEN));
3370    }
3371  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3372    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3373    tty->print("expected:");
3374    os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3375    tty->cr();
3376    tty->print("  found:");
3377    os::Posix::print_sa_flags(tty, act.sa_flags);
3378    tty->cr();
3379    // No need to check this sig any longer
3380    sigaddset(&check_signal_done, sig);
3381  }
3382
3383  // Dump all the signal
3384  if (sigismember(&check_signal_done, sig)) {
3385    print_signal_handlers(tty, buf, O_BUFLEN);
3386  }
3387}
3388
3389// To install functions for atexit system call
3390extern "C" {
3391  static void perfMemory_exit_helper() {
3392    perfMemory_exit();
3393  }
3394}
3395
3396// This is called _before_ the most of global arguments have been parsed.
3397void os::init(void) {
3398  // This is basic, we want to know if that ever changes.
3399  // (Shared memory boundary is supposed to be a 256M aligned.)
3400  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3401
3402  // Record process break at startup.
3403  g_brk_at_startup = (address) ::sbrk(0);
3404  assert(g_brk_at_startup != (address) -1, "sbrk failed");
3405
3406  // First off, we need to know whether we run on AIX or PASE, and
3407  // the OS level we run on.
3408  os::Aix::initialize_os_info();
3409
3410  // Scan environment (SPEC1170 behaviour, etc).
3411  os::Aix::scan_environment();
3412
3413  // Probe multipage support.
3414  query_multipage_support();
3415
3416  // Act like we only have one page size by eliminating corner cases which
3417  // we did not support very well anyway.
3418  // We have two input conditions:
3419  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3420  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3421  //    setting.
3422  //    Data segment page size is important for us because it defines the thread stack page
3423  //    size, which is needed for guard page handling, stack banging etc.
3424  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3425  //    and should be allocated with 64k pages.
3426  //
3427  // So, we do the following:
3428  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3429  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3430  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3431  // 64k          no              --- AIX 5.2 ? ---
3432  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3433
3434  // We explicitly leave no option to change page size, because only upgrading would work,
3435  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3436
3437  if (g_multipage_support.datapsize == 4*K) {
3438    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3439    if (g_multipage_support.can_use_64K_pages) {
3440      // .. but we are able to use 64K pages dynamically.
3441      // This would be typical for java launchers which are not linked
3442      // with datapsize=64K (like, any other launcher but our own).
3443      //
3444      // In this case it would be smart to allocate the java heap with 64K
3445      // to get the performance benefit, and to fake 64k pages for the
3446      // data segment (when dealing with thread stacks).
3447      //
3448      // However, leave a possibility to downgrade to 4K, using
3449      // -XX:-Use64KPages.
3450      if (Use64KPages) {
3451        trcVerbose("64K page mode (faked for data segment)");
3452        Aix::_page_size = 64*K;
3453      } else {
3454        trcVerbose("4K page mode (Use64KPages=off)");
3455        Aix::_page_size = 4*K;
3456      }
3457    } else {
3458      // .. and not able to allocate 64k pages dynamically. Here, just
3459      // fall back to 4K paged mode and use mmap for everything.
3460      trcVerbose("4K page mode");
3461      Aix::_page_size = 4*K;
3462      FLAG_SET_ERGO(bool, Use64KPages, false);
3463    }
3464  } else {
3465    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3466    // This normally means that we can allocate 64k pages dynamically.
3467    // (There is one special case where this may be false: EXTSHM=on.
3468    // but we decided to not support that mode).
3469    assert0(g_multipage_support.can_use_64K_pages);
3470    Aix::_page_size = 64*K;
3471    trcVerbose("64K page mode");
3472    FLAG_SET_ERGO(bool, Use64KPages, true);
3473  }
3474
3475  // For now UseLargePages is just ignored.
3476  FLAG_SET_ERGO(bool, UseLargePages, false);
3477  _page_sizes[0] = 0;
3478
3479  // debug trace
3480  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3481
3482  // Next, we need to initialize libo4 and libperfstat libraries.
3483  if (os::Aix::on_pase()) {
3484    os::Aix::initialize_libo4();
3485  } else {
3486    os::Aix::initialize_libperfstat();
3487  }
3488
3489  // Reset the perfstat information provided by ODM.
3490  if (os::Aix::on_aix()) {
3491    libperfstat::perfstat_reset();
3492  }
3493
3494  // Now initialze basic system properties. Note that for some of the values we
3495  // need libperfstat etc.
3496  os::Aix::initialize_system_info();
3497
3498  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3499
3500  init_random(1234567);
3501
3502  ThreadCritical::initialize();
3503
3504  // Main_thread points to the aboriginal thread.
3505  Aix::_main_thread = pthread_self();
3506
3507  initial_time_count = os::elapsed_counter();
3508
3509  os::Posix::init();
3510}
3511
3512// This is called _after_ the global arguments have been parsed.
3513jint os::init_2(void) {
3514
3515  os::Posix::init_2();
3516
3517  if (os::Aix::on_pase()) {
3518    trcVerbose("Running on PASE.");
3519  } else {
3520    trcVerbose("Running on AIX (not PASE).");
3521  }
3522
3523  trcVerbose("processor count: %d", os::_processor_count);
3524  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3525
3526  // Initially build up the loaded dll map.
3527  LoadedLibraries::reload();
3528  if (Verbose) {
3529    trcVerbose("Loaded Libraries: ");
3530    LoadedLibraries::print(tty);
3531  }
3532
3533  const int page_size = Aix::page_size();
3534  const int map_size = page_size;
3535
3536  address map_address = (address) MAP_FAILED;
3537  const int prot  = PROT_READ;
3538  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3539
3540  // Use optimized addresses for the polling page,
3541  // e.g. map it to a special 32-bit address.
3542  if (OptimizePollingPageLocation) {
3543    // architecture-specific list of address wishes:
3544    address address_wishes[] = {
3545      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3546      // PPC64: all address wishes are non-negative 32 bit values where
3547      // the lower 16 bits are all zero. we can load these addresses
3548      // with a single ppc_lis instruction.
3549      (address) 0x30000000, (address) 0x31000000,
3550      (address) 0x32000000, (address) 0x33000000,
3551      (address) 0x40000000, (address) 0x41000000,
3552      (address) 0x42000000, (address) 0x43000000,
3553      (address) 0x50000000, (address) 0x51000000,
3554      (address) 0x52000000, (address) 0x53000000,
3555      (address) 0x60000000, (address) 0x61000000,
3556      (address) 0x62000000, (address) 0x63000000
3557    };
3558    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3559
3560    // iterate over the list of address wishes:
3561    for (int i=0; i<address_wishes_length; i++) {
3562      // Try to map with current address wish.
3563      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3564      // fail if the address is already mapped.
3565      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3566                                     map_size, prot,
3567                                     flags | MAP_FIXED,
3568                                     -1, 0);
3569      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3570                   address_wishes[i], map_address + (ssize_t)page_size);
3571
3572      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3573        // Map succeeded and map_address is at wished address, exit loop.
3574        break;
3575      }
3576
3577      if (map_address != (address) MAP_FAILED) {
3578        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3579        ::munmap(map_address, map_size);
3580        map_address = (address) MAP_FAILED;
3581      }
3582      // Map failed, continue loop.
3583    }
3584  } // end OptimizePollingPageLocation
3585
3586  if (map_address == (address) MAP_FAILED) {
3587    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3588  }
3589  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3590  os::set_polling_page(map_address);
3591
3592  if (!UseMembar) {
3593    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3594    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3595    os::set_memory_serialize_page(mem_serialize_page);
3596
3597    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3598        mem_serialize_page, mem_serialize_page + Aix::page_size(),
3599        Aix::page_size(), Aix::page_size());
3600  }
3601
3602  // initialize suspend/resume support - must do this before signal_sets_init()
3603  if (SR_initialize() != 0) {
3604    perror("SR_initialize failed");
3605    return JNI_ERR;
3606  }
3607
3608  Aix::signal_sets_init();
3609  Aix::install_signal_handlers();
3610
3611  // Check and sets minimum stack sizes against command line options
3612  if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3613    return JNI_ERR;
3614  }
3615
3616  if (UseNUMA) {
3617    UseNUMA = false;
3618    warning("NUMA optimizations are not available on this OS.");
3619  }
3620
3621  if (MaxFDLimit) {
3622    // Set the number of file descriptors to max. print out error
3623    // if getrlimit/setrlimit fails but continue regardless.
3624    struct rlimit nbr_files;
3625    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3626    if (status != 0) {
3627      log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3628    } else {
3629      nbr_files.rlim_cur = nbr_files.rlim_max;
3630      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3631      if (status != 0) {
3632        log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3633      }
3634    }
3635  }
3636
3637  if (PerfAllowAtExitRegistration) {
3638    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3639    // At exit functions can be delayed until process exit time, which
3640    // can be problematic for embedded VM situations. Embedded VMs should
3641    // call DestroyJavaVM() to assure that VM resources are released.
3642
3643    // Note: perfMemory_exit_helper atexit function may be removed in
3644    // the future if the appropriate cleanup code can be added to the
3645    // VM_Exit VMOperation's doit method.
3646    if (atexit(perfMemory_exit_helper) != 0) {
3647      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3648    }
3649  }
3650
3651  return JNI_OK;
3652}
3653
3654// Mark the polling page as unreadable
3655void os::make_polling_page_unreadable(void) {
3656  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3657    fatal("Could not disable polling page");
3658  }
3659};
3660
3661// Mark the polling page as readable
3662void os::make_polling_page_readable(void) {
3663  // Changed according to os_linux.cpp.
3664  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3665    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3666  }
3667};
3668
3669int os::active_processor_count() {
3670  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3671  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3672  return online_cpus;
3673}
3674
3675void os::set_native_thread_name(const char *name) {
3676  // Not yet implemented.
3677  return;
3678}
3679
3680bool os::distribute_processes(uint length, uint* distribution) {
3681  // Not yet implemented.
3682  return false;
3683}
3684
3685bool os::bind_to_processor(uint processor_id) {
3686  // Not yet implemented.
3687  return false;
3688}
3689
3690void os::SuspendedThreadTask::internal_do_task() {
3691  if (do_suspend(_thread->osthread())) {
3692    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3693    do_task(context);
3694    do_resume(_thread->osthread());
3695  }
3696}
3697
3698class PcFetcher : public os::SuspendedThreadTask {
3699public:
3700  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3701  ExtendedPC result();
3702protected:
3703  void do_task(const os::SuspendedThreadTaskContext& context);
3704private:
3705  ExtendedPC _epc;
3706};
3707
3708ExtendedPC PcFetcher::result() {
3709  guarantee(is_done(), "task is not done yet.");
3710  return _epc;
3711}
3712
3713void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3714  Thread* thread = context.thread();
3715  OSThread* osthread = thread->osthread();
3716  if (osthread->ucontext() != NULL) {
3717    _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3718  } else {
3719    // NULL context is unexpected, double-check this is the VMThread.
3720    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3721  }
3722}
3723
3724// Suspends the target using the signal mechanism and then grabs the PC before
3725// resuming the target. Used by the flat-profiler only
3726ExtendedPC os::get_thread_pc(Thread* thread) {
3727  // Make sure that it is called by the watcher for the VMThread.
3728  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3729  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3730
3731  PcFetcher fetcher(thread);
3732  fetcher.run();
3733  return fetcher.result();
3734}
3735
3736////////////////////////////////////////////////////////////////////////////////
3737// debug support
3738
3739bool os::find(address addr, outputStream* st) {
3740
3741  st->print(PTR_FORMAT ": ", addr);
3742
3743  loaded_module_t lm;
3744  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3745      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3746    st->print_cr("%s", lm.path);
3747    return true;
3748  }
3749
3750  return false;
3751}
3752
3753////////////////////////////////////////////////////////////////////////////////
3754// misc
3755
3756// This does not do anything on Aix. This is basically a hook for being
3757// able to use structured exception handling (thread-local exception filters)
3758// on, e.g., Win32.
3759void
3760os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3761                         JavaCallArguments* args, Thread* thread) {
3762  f(value, method, args, thread);
3763}
3764
3765void os::print_statistics() {
3766}
3767
3768bool os::message_box(const char* title, const char* message) {
3769  int i;
3770  fdStream err(defaultStream::error_fd());
3771  for (i = 0; i < 78; i++) err.print_raw("=");
3772  err.cr();
3773  err.print_raw_cr(title);
3774  for (i = 0; i < 78; i++) err.print_raw("-");
3775  err.cr();
3776  err.print_raw_cr(message);
3777  for (i = 0; i < 78; i++) err.print_raw("=");
3778  err.cr();
3779
3780  char buf[16];
3781  // Prevent process from exiting upon "read error" without consuming all CPU
3782  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3783
3784  return buf[0] == 'y' || buf[0] == 'Y';
3785}
3786
3787int os::stat(const char *path, struct stat *sbuf) {
3788  char pathbuf[MAX_PATH];
3789  if (strlen(path) > MAX_PATH - 1) {
3790    errno = ENAMETOOLONG;
3791    return -1;
3792  }
3793  os::native_path(strcpy(pathbuf, path));
3794  return ::stat(pathbuf, sbuf);
3795}
3796
3797// Is a (classpath) directory empty?
3798bool os::dir_is_empty(const char* path) {
3799  DIR *dir = NULL;
3800  struct dirent *ptr;
3801
3802  dir = opendir(path);
3803  if (dir == NULL) return true;
3804
3805  /* Scan the directory */
3806  bool result = true;
3807  char buf[sizeof(struct dirent) + MAX_PATH];
3808  while (result && (ptr = ::readdir(dir)) != NULL) {
3809    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3810      result = false;
3811    }
3812  }
3813  closedir(dir);
3814  return result;
3815}
3816
3817// This code originates from JDK's sysOpen and open64_w
3818// from src/solaris/hpi/src/system_md.c
3819
3820int os::open(const char *path, int oflag, int mode) {
3821
3822  if (strlen(path) > MAX_PATH - 1) {
3823    errno = ENAMETOOLONG;
3824    return -1;
3825  }
3826  int fd;
3827
3828  fd = ::open64(path, oflag, mode);
3829  if (fd == -1) return -1;
3830
3831  // If the open succeeded, the file might still be a directory.
3832  {
3833    struct stat64 buf64;
3834    int ret = ::fstat64(fd, &buf64);
3835    int st_mode = buf64.st_mode;
3836
3837    if (ret != -1) {
3838      if ((st_mode & S_IFMT) == S_IFDIR) {
3839        errno = EISDIR;
3840        ::close(fd);
3841        return -1;
3842      }
3843    } else {
3844      ::close(fd);
3845      return -1;
3846    }
3847  }
3848
3849  // All file descriptors that are opened in the JVM and not
3850  // specifically destined for a subprocess should have the
3851  // close-on-exec flag set. If we don't set it, then careless 3rd
3852  // party native code might fork and exec without closing all
3853  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3854  // UNIXProcess.c), and this in turn might:
3855  //
3856  // - cause end-of-file to fail to be detected on some file
3857  //   descriptors, resulting in mysterious hangs, or
3858  //
3859  // - might cause an fopen in the subprocess to fail on a system
3860  //   suffering from bug 1085341.
3861  //
3862  // (Yes, the default setting of the close-on-exec flag is a Unix
3863  // design flaw.)
3864  //
3865  // See:
3866  // 1085341: 32-bit stdio routines should support file descriptors >255
3867  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3868  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3869#ifdef FD_CLOEXEC
3870  {
3871    int flags = ::fcntl(fd, F_GETFD);
3872    if (flags != -1)
3873      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3874  }
3875#endif
3876
3877  return fd;
3878}
3879
3880// create binary file, rewriting existing file if required
3881int os::create_binary_file(const char* path, bool rewrite_existing) {
3882  int oflags = O_WRONLY | O_CREAT;
3883  if (!rewrite_existing) {
3884    oflags |= O_EXCL;
3885  }
3886  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3887}
3888
3889// return current position of file pointer
3890jlong os::current_file_offset(int fd) {
3891  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3892}
3893
3894// move file pointer to the specified offset
3895jlong os::seek_to_file_offset(int fd, jlong offset) {
3896  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3897}
3898
3899// This code originates from JDK's sysAvailable
3900// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3901
3902int os::available(int fd, jlong *bytes) {
3903  jlong cur, end;
3904  int mode;
3905  struct stat64 buf64;
3906
3907  if (::fstat64(fd, &buf64) >= 0) {
3908    mode = buf64.st_mode;
3909    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3910      int n;
3911      if (::ioctl(fd, FIONREAD, &n) >= 0) {
3912        *bytes = n;
3913        return 1;
3914      }
3915    }
3916  }
3917  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3918    return 0;
3919  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3920    return 0;
3921  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3922    return 0;
3923  }
3924  *bytes = end - cur;
3925  return 1;
3926}
3927
3928// Map a block of memory.
3929char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3930                        char *addr, size_t bytes, bool read_only,
3931                        bool allow_exec) {
3932  int prot;
3933  int flags = MAP_PRIVATE;
3934
3935  if (read_only) {
3936    prot = PROT_READ;
3937    flags = MAP_SHARED;
3938  } else {
3939    prot = PROT_READ | PROT_WRITE;
3940    flags = MAP_PRIVATE;
3941  }
3942
3943  if (allow_exec) {
3944    prot |= PROT_EXEC;
3945  }
3946
3947  if (addr != NULL) {
3948    flags |= MAP_FIXED;
3949  }
3950
3951  // Allow anonymous mappings if 'fd' is -1.
3952  if (fd == -1) {
3953    flags |= MAP_ANONYMOUS;
3954  }
3955
3956  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3957                                     fd, file_offset);
3958  if (mapped_address == MAP_FAILED) {
3959    return NULL;
3960  }
3961  return mapped_address;
3962}
3963
3964// Remap a block of memory.
3965char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3966                          char *addr, size_t bytes, bool read_only,
3967                          bool allow_exec) {
3968  // same as map_memory() on this OS
3969  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3970                        allow_exec);
3971}
3972
3973// Unmap a block of memory.
3974bool os::pd_unmap_memory(char* addr, size_t bytes) {
3975  return munmap(addr, bytes) == 0;
3976}
3977
3978// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3979// are used by JVM M&M and JVMTI to get user+sys or user CPU time
3980// of a thread.
3981//
3982// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3983// the fast estimate available on the platform.
3984
3985jlong os::current_thread_cpu_time() {
3986  // return user + sys since the cost is the same
3987  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3988  assert(n >= 0, "negative CPU time");
3989  return n;
3990}
3991
3992jlong os::thread_cpu_time(Thread* thread) {
3993  // consistent with what current_thread_cpu_time() returns
3994  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3995  assert(n >= 0, "negative CPU time");
3996  return n;
3997}
3998
3999jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4000  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4001  assert(n >= 0, "negative CPU time");
4002  return n;
4003}
4004
4005static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4006  bool error = false;
4007
4008  jlong sys_time = 0;
4009  jlong user_time = 0;
4010
4011  // Reimplemented using getthrds64().
4012  //
4013  // Works like this:
4014  // For the thread in question, get the kernel thread id. Then get the
4015  // kernel thread statistics using that id.
4016  //
4017  // This only works of course when no pthread scheduling is used,
4018  // i.e. there is a 1:1 relationship to kernel threads.
4019  // On AIX, see AIXTHREAD_SCOPE variable.
4020
4021  pthread_t pthtid = thread->osthread()->pthread_id();
4022
4023  // retrieve kernel thread id for the pthread:
4024  tid64_t tid = 0;
4025  struct __pthrdsinfo pinfo;
4026  // I just love those otherworldly IBM APIs which force me to hand down
4027  // dummy buffers for stuff I dont care for...
4028  char dummy[1];
4029  int dummy_size = sizeof(dummy);
4030  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4031                          dummy, &dummy_size) == 0) {
4032    tid = pinfo.__pi_tid;
4033  } else {
4034    tty->print_cr("pthread_getthrds_np failed.");
4035    error = true;
4036  }
4037
4038  // retrieve kernel timing info for that kernel thread
4039  if (!error) {
4040    struct thrdentry64 thrdentry;
4041    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4042      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4043      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4044    } else {
4045      tty->print_cr("pthread_getthrds_np failed.");
4046      error = true;
4047    }
4048  }
4049
4050  if (p_sys_time) {
4051    *p_sys_time = sys_time;
4052  }
4053
4054  if (p_user_time) {
4055    *p_user_time = user_time;
4056  }
4057
4058  if (error) {
4059    return false;
4060  }
4061
4062  return true;
4063}
4064
4065jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4066  jlong sys_time;
4067  jlong user_time;
4068
4069  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4070    return -1;
4071  }
4072
4073  return user_sys_cpu_time ? sys_time + user_time : user_time;
4074}
4075
4076void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4077  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4078  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4079  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4080  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4081}
4082
4083void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4084  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4085  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4086  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4087  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4088}
4089
4090bool os::is_thread_cpu_time_supported() {
4091  return true;
4092}
4093
4094// System loadavg support. Returns -1 if load average cannot be obtained.
4095// For now just return the system wide load average (no processor sets).
4096int os::loadavg(double values[], int nelem) {
4097
4098  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4099  guarantee(values, "argument error");
4100
4101  if (os::Aix::on_pase()) {
4102
4103    // AS/400 PASE: use libo4 porting library
4104    double v[3] = { 0.0, 0.0, 0.0 };
4105
4106    if (libo4::get_load_avg(v, v + 1, v + 2)) {
4107      for (int i = 0; i < nelem; i ++) {
4108        values[i] = v[i];
4109      }
4110      return nelem;
4111    } else {
4112      return -1;
4113    }
4114
4115  } else {
4116
4117    // AIX: use libperfstat
4118    libperfstat::cpuinfo_t ci;
4119    if (libperfstat::get_cpuinfo(&ci)) {
4120      for (int i = 0; i < nelem; i++) {
4121        values[i] = ci.loadavg[i];
4122      }
4123    } else {
4124      return -1;
4125    }
4126    return nelem;
4127  }
4128}
4129
4130void os::pause() {
4131  char filename[MAX_PATH];
4132  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4133    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4134  } else {
4135    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4136  }
4137
4138  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4139  if (fd != -1) {
4140    struct stat buf;
4141    ::close(fd);
4142    while (::stat(filename, &buf) == 0) {
4143      (void)::poll(NULL, 0, 100);
4144    }
4145  } else {
4146    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4147  }
4148}
4149
4150bool os::Aix::is_primordial_thread() {
4151  if (pthread_self() == (pthread_t)1) {
4152    return true;
4153  } else {
4154    return false;
4155  }
4156}
4157
4158// OS recognitions (PASE/AIX, OS level) call this before calling any
4159// one of Aix::on_pase(), Aix::os_version() static
4160void os::Aix::initialize_os_info() {
4161
4162  assert(_on_pase == -1 && _os_version == 0, "already called.");
4163
4164  struct utsname uts;
4165  memset(&uts, 0, sizeof(uts));
4166  strcpy(uts.sysname, "?");
4167  if (::uname(&uts) == -1) {
4168    trcVerbose("uname failed (%d)", errno);
4169    guarantee(0, "Could not determine whether we run on AIX or PASE");
4170  } else {
4171    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4172               "node \"%s\" machine \"%s\"\n",
4173               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4174    const int major = atoi(uts.version);
4175    assert(major > 0, "invalid OS version");
4176    const int minor = atoi(uts.release);
4177    assert(minor > 0, "invalid OS release");
4178    _os_version = (major << 24) | (minor << 16);
4179    char ver_str[20] = {0};
4180    char *name_str = "unknown OS";
4181    if (strcmp(uts.sysname, "OS400") == 0) {
4182      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4183      _on_pase = 1;
4184      if (os_version_short() < 0x0504) {
4185        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4186        assert(false, "OS/400 release too old.");
4187      }
4188      name_str = "OS/400 (pase)";
4189      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4190    } else if (strcmp(uts.sysname, "AIX") == 0) {
4191      // We run on AIX. We do not support versions older than AIX 5.3.
4192      _on_pase = 0;
4193      // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4194      odmWrapper::determine_os_kernel_version(&_os_version);
4195      if (os_version_short() < 0x0503) {
4196        trcVerbose("AIX release older than AIX 5.3 not supported.");
4197        assert(false, "AIX release too old.");
4198      }
4199      name_str = "AIX";
4200      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4201                   major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4202    } else {
4203      assert(false, name_str);
4204    }
4205    trcVerbose("We run on %s %s", name_str, ver_str);
4206  }
4207
4208  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4209} // end: os::Aix::initialize_os_info()
4210
4211// Scan environment for important settings which might effect the VM.
4212// Trace out settings. Warn about invalid settings and/or correct them.
4213//
4214// Must run after os::Aix::initialue_os_info().
4215void os::Aix::scan_environment() {
4216
4217  char* p;
4218  int rc;
4219
4220  // Warn explicity if EXTSHM=ON is used. That switch changes how
4221  // System V shared memory behaves. One effect is that page size of
4222  // shared memory cannot be change dynamically, effectivly preventing
4223  // large pages from working.
4224  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4225  // recommendation is (in OSS notes) to switch it off.
4226  p = ::getenv("EXTSHM");
4227  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4228  if (p && strcasecmp(p, "ON") == 0) {
4229    _extshm = 1;
4230    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4231    if (!AllowExtshm) {
4232      // We allow under certain conditions the user to continue. However, we want this
4233      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4234      // that the VM is not able to allocate 64k pages for the heap.
4235      // We do not want to run with reduced performance.
4236      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4237    }
4238  } else {
4239    _extshm = 0;
4240  }
4241
4242  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4243  // Not tested, not supported.
4244  //
4245  // Note that it might be worth the trouble to test and to require it, if only to
4246  // get useful return codes for mprotect.
4247  //
4248  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4249  // exec() ? before loading the libjvm ? ....)
4250  p = ::getenv("XPG_SUS_ENV");
4251  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4252  if (p && strcmp(p, "ON") == 0) {
4253    _xpg_sus_mode = 1;
4254    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4255    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4256    // clobber address ranges. If we ever want to support that, we have to do some
4257    // testing first.
4258    guarantee(false, "XPG_SUS_ENV=ON not supported");
4259  } else {
4260    _xpg_sus_mode = 0;
4261  }
4262
4263  if (os::Aix::on_pase()) {
4264    p = ::getenv("QIBM_MULTI_THREADED");
4265    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4266  }
4267
4268  p = ::getenv("LDR_CNTRL");
4269  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4270  if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4271    if (p && ::strstr(p, "TEXTPSIZE")) {
4272      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4273        "you may experience hangs or crashes on OS/400 V7R1.");
4274    }
4275  }
4276
4277  p = ::getenv("AIXTHREAD_GUARDPAGES");
4278  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4279
4280} // end: os::Aix::scan_environment()
4281
4282// PASE: initialize the libo4 library (PASE porting library).
4283void os::Aix::initialize_libo4() {
4284  guarantee(os::Aix::on_pase(), "OS/400 only.");
4285  if (!libo4::init()) {
4286    trcVerbose("libo4 initialization failed.");
4287    assert(false, "libo4 initialization failed");
4288  } else {
4289    trcVerbose("libo4 initialized.");
4290  }
4291}
4292
4293// AIX: initialize the libperfstat library.
4294void os::Aix::initialize_libperfstat() {
4295  assert(os::Aix::on_aix(), "AIX only");
4296  if (!libperfstat::init()) {
4297    trcVerbose("libperfstat initialization failed.");
4298    assert(false, "libperfstat initialization failed");
4299  } else {
4300    trcVerbose("libperfstat initialized.");
4301  }
4302}
4303
4304/////////////////////////////////////////////////////////////////////////////
4305// thread stack
4306
4307// Get the current stack base from the OS (actually, the pthread library).
4308// Note: usually not page aligned.
4309address os::current_stack_base() {
4310  AixMisc::stackbounds_t bounds;
4311  bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4312  guarantee(rc, "Unable to retrieve stack bounds.");
4313  return bounds.base;
4314}
4315
4316// Get the current stack size from the OS (actually, the pthread library).
4317// Returned size is such that (base - size) is always aligned to page size.
4318size_t os::current_stack_size() {
4319  AixMisc::stackbounds_t bounds;
4320  bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4321  guarantee(rc, "Unable to retrieve stack bounds.");
4322  // Align the returned stack size such that the stack low address
4323  // is aligned to page size (Note: base is usually not and we do not care).
4324  // We need to do this because caller code will assume stack low address is
4325  // page aligned and will place guard pages without checking.
4326  address low = bounds.base - bounds.size;
4327  address low_aligned = (address)align_up(low, os::vm_page_size());
4328  size_t s = bounds.base - low_aligned;
4329  return s;
4330}
4331
4332extern char** environ;
4333
4334// Run the specified command in a separate process. Return its exit value,
4335// or -1 on failure (e.g. can't fork a new process).
4336// Unlike system(), this function can be called from signal handler. It
4337// doesn't block SIGINT et al.
4338int os::fork_and_exec(char* cmd) {
4339  char * argv[4] = {"sh", "-c", cmd, NULL};
4340
4341  pid_t pid = fork();
4342
4343  if (pid < 0) {
4344    // fork failed
4345    return -1;
4346
4347  } else if (pid == 0) {
4348    // child process
4349
4350    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4351    execve("/usr/bin/sh", argv, environ);
4352
4353    // execve failed
4354    _exit(-1);
4355
4356  } else {
4357    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4358    // care about the actual exit code, for now.
4359
4360    int status;
4361
4362    // Wait for the child process to exit. This returns immediately if
4363    // the child has already exited. */
4364    while (waitpid(pid, &status, 0) < 0) {
4365      switch (errno) {
4366        case ECHILD: return 0;
4367        case EINTR: break;
4368        default: return -1;
4369      }
4370    }
4371
4372    if (WIFEXITED(status)) {
4373      // The child exited normally; get its exit code.
4374      return WEXITSTATUS(status);
4375    } else if (WIFSIGNALED(status)) {
4376      // The child exited because of a signal.
4377      // The best value to return is 0x80 + signal number,
4378      // because that is what all Unix shells do, and because
4379      // it allows callers to distinguish between process exit and
4380      // process death by signal.
4381      return 0x80 + WTERMSIG(status);
4382    } else {
4383      // Unknown exit code; pass it through.
4384      return status;
4385    }
4386  }
4387  return -1;
4388}
4389
4390// is_headless_jre()
4391//
4392// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4393// in order to report if we are running in a headless jre.
4394//
4395// Since JDK8 xawt/libmawt.so is moved into the same directory
4396// as libawt.so, and renamed libawt_xawt.so
4397bool os::is_headless_jre() {
4398  struct stat statbuf;
4399  char buf[MAXPATHLEN];
4400  char libmawtpath[MAXPATHLEN];
4401  const char *xawtstr = "/xawt/libmawt.so";
4402  const char *new_xawtstr = "/libawt_xawt.so";
4403
4404  char *p;
4405
4406  // Get path to libjvm.so
4407  os::jvm_path(buf, sizeof(buf));
4408
4409  // Get rid of libjvm.so
4410  p = strrchr(buf, '/');
4411  if (p == NULL) return false;
4412  else *p = '\0';
4413
4414  // Get rid of client or server
4415  p = strrchr(buf, '/');
4416  if (p == NULL) return false;
4417  else *p = '\0';
4418
4419  // check xawt/libmawt.so
4420  strcpy(libmawtpath, buf);
4421  strcat(libmawtpath, xawtstr);
4422  if (::stat(libmawtpath, &statbuf) == 0) return false;
4423
4424  // check libawt_xawt.so
4425  strcpy(libmawtpath, buf);
4426  strcat(libmawtpath, new_xawtstr);
4427  if (::stat(libmawtpath, &statbuf) == 0) return false;
4428
4429  return true;
4430}
4431
4432// Get the default path to the core file
4433// Returns the length of the string
4434int os::get_core_path(char* buffer, size_t bufferSize) {
4435  const char* p = get_current_directory(buffer, bufferSize);
4436
4437  if (p == NULL) {
4438    assert(p != NULL, "failed to get current directory");
4439    return 0;
4440  }
4441
4442  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4443                                               p, current_process_id());
4444
4445  return strlen(buffer);
4446}
4447
4448#ifndef PRODUCT
4449void TestReserveMemorySpecial_test() {
4450  // No tests available for this platform
4451}
4452#endif
4453
4454bool os::start_debugging(char *buf, int buflen) {
4455  int len = (int)strlen(buf);
4456  char *p = &buf[len];
4457
4458  jio_snprintf(p, buflen -len,
4459                 "\n\n"
4460                 "Do you want to debug the problem?\n\n"
4461                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4462                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4463                 "Otherwise, press RETURN to abort...",
4464                 os::current_process_id(),
4465                 os::current_thread_id(), thread_self());
4466
4467  bool yes = os::message_box("Unexpected Error", buf);
4468
4469  if (yes) {
4470    // yes, user asked VM to launch debugger
4471    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4472
4473    os::fork_and_exec(buf);
4474    yes = false;
4475  }
4476  return yes;
4477}
4478
4479static inline time_t get_mtime(const char* filename) {
4480  struct stat st;
4481  int ret = os::stat(filename, &st);
4482  assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4483  return st.st_mtime;
4484}
4485
4486int os::compare_file_modified_times(const char* file1, const char* file2) {
4487  time_t t1 = get_mtime(file1);
4488  time_t t2 = get_mtime(file2);
4489  return t1 - t2;
4490}
4491