os_aix.cpp revision 13544:61c0ae8bee4e
1/*
2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2017 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "logging/log.hpp"
40#include "libo4.hpp"
41#include "libperfstat_aix.hpp"
42#include "libodm_aix.hpp"
43#include "loadlib_aix.hpp"
44#include "memory/allocation.inline.hpp"
45#include "memory/filemap.hpp"
46#include "misc_aix.hpp"
47#include "oops/oop.inline.hpp"
48#include "os_aix.inline.hpp"
49#include "os_share_aix.hpp"
50#include "porting_aix.hpp"
51#include "prims/jniFastGetField.hpp"
52#include "prims/jvm.h"
53#include "prims/jvm_misc.hpp"
54#include "runtime/arguments.hpp"
55#include "runtime/atomic.hpp"
56#include "runtime/extendedPC.hpp"
57#include "runtime/globals.hpp"
58#include "runtime/interfaceSupport.hpp"
59#include "runtime/java.hpp"
60#include "runtime/javaCalls.hpp"
61#include "runtime/mutexLocker.hpp"
62#include "runtime/objectMonitor.hpp"
63#include "runtime/orderAccess.inline.hpp"
64#include "runtime/os.hpp"
65#include "runtime/osThread.hpp"
66#include "runtime/perfMemory.hpp"
67#include "runtime/sharedRuntime.hpp"
68#include "runtime/statSampler.hpp"
69#include "runtime/stubRoutines.hpp"
70#include "runtime/thread.inline.hpp"
71#include "runtime/threadCritical.hpp"
72#include "runtime/timer.hpp"
73#include "runtime/vm_version.hpp"
74#include "services/attachListener.hpp"
75#include "services/runtimeService.hpp"
76#include "utilities/align.hpp"
77#include "utilities/decoder.hpp"
78#include "utilities/defaultStream.hpp"
79#include "utilities/events.hpp"
80#include "utilities/growableArray.hpp"
81#include "utilities/vmError.hpp"
82
83// put OS-includes here (sorted alphabetically)
84#include <errno.h>
85#include <fcntl.h>
86#include <inttypes.h>
87#include <poll.h>
88#include <procinfo.h>
89#include <pthread.h>
90#include <pwd.h>
91#include <semaphore.h>
92#include <signal.h>
93#include <stdint.h>
94#include <stdio.h>
95#include <string.h>
96#include <unistd.h>
97#include <sys/ioctl.h>
98#include <sys/ipc.h>
99#include <sys/mman.h>
100#include <sys/resource.h>
101#include <sys/select.h>
102#include <sys/shm.h>
103#include <sys/socket.h>
104#include <sys/stat.h>
105#include <sys/sysinfo.h>
106#include <sys/systemcfg.h>
107#include <sys/time.h>
108#include <sys/times.h>
109#include <sys/types.h>
110#include <sys/utsname.h>
111#include <sys/vminfo.h>
112#include <sys/wait.h>
113
114// Missing prototypes for various system APIs.
115extern "C"
116int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
117
118#if !defined(_AIXVERSION_610)
119extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
120extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
121extern "C" int getargs   (procsinfo*, int, char*, int);
122#endif
123
124#define MAX_PATH (2 * K)
125
126// for timer info max values which include all bits
127#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
128// for multipage initialization error analysis (in 'g_multipage_error')
129#define ERROR_MP_OS_TOO_OLD                          100
130#define ERROR_MP_EXTSHM_ACTIVE                       101
131#define ERROR_MP_VMGETINFO_FAILED                    102
132#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
133
134static address resolve_function_descriptor_to_code_pointer(address p);
135
136static void vmembk_print_on(outputStream* os);
137
138////////////////////////////////////////////////////////////////////////////////
139// global variables (for a description see os_aix.hpp)
140
141julong    os::Aix::_physical_memory = 0;
142
143pthread_t os::Aix::_main_thread = ((pthread_t)0);
144int       os::Aix::_page_size = -1;
145
146// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
147int       os::Aix::_on_pase = -1;
148
149// 0 = uninitialized, otherwise 32 bit number:
150//  0xVVRRTTSS
151//  VV - major version
152//  RR - minor version
153//  TT - tech level, if known, 0 otherwise
154//  SS - service pack, if known, 0 otherwise
155uint32_t  os::Aix::_os_version = 0;
156
157// -1 = uninitialized, 0 - no, 1 - yes
158int       os::Aix::_xpg_sus_mode = -1;
159
160// -1 = uninitialized, 0 - no, 1 - yes
161int       os::Aix::_extshm = -1;
162
163////////////////////////////////////////////////////////////////////////////////
164// local variables
165
166static jlong    initial_time_count = 0;
167static int      clock_tics_per_sec = 100;
168static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
169static bool     check_signals      = true;
170static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
171static sigset_t SR_sigset;
172
173// Process break recorded at startup.
174static address g_brk_at_startup = NULL;
175
176// This describes the state of multipage support of the underlying
177// OS. Note that this is of no interest to the outsize world and
178// therefore should not be defined in AIX class.
179//
180// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
181// latter two (16M "large" resp. 16G "huge" pages) require special
182// setup and are normally not available.
183//
184// AIX supports multiple page sizes per process, for:
185//  - Stack (of the primordial thread, so not relevant for us)
186//  - Data - data, bss, heap, for us also pthread stacks
187//  - Text - text code
188//  - shared memory
189//
190// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
191// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
192//
193// For shared memory, page size can be set dynamically via
194// shmctl(). Different shared memory regions can have different page
195// sizes.
196//
197// More information can be found at AIBM info center:
198//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
199//
200static struct {
201  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
202  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
203  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
204  size_t pthr_stack_pagesize; // stack page size of pthread threads
205  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
206  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
207  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
208  int error;                  // Error describing if something went wrong at multipage init.
209} g_multipage_support = {
210  (size_t) -1,
211  (size_t) -1,
212  (size_t) -1,
213  (size_t) -1,
214  (size_t) -1,
215  false, false,
216  0
217};
218
219// We must not accidentally allocate memory close to the BRK - even if
220// that would work - because then we prevent the BRK segment from
221// growing which may result in a malloc OOM even though there is
222// enough memory. The problem only arises if we shmat() or mmap() at
223// a specific wish address, e.g. to place the heap in a
224// compressed-oops-friendly way.
225static bool is_close_to_brk(address a) {
226  assert0(g_brk_at_startup != NULL);
227  if (a >= g_brk_at_startup &&
228      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
229    return true;
230  }
231  return false;
232}
233
234julong os::available_memory() {
235  return Aix::available_memory();
236}
237
238julong os::Aix::available_memory() {
239  // Avoid expensive API call here, as returned value will always be null.
240  if (os::Aix::on_pase()) {
241    return 0x0LL;
242  }
243  os::Aix::meminfo_t mi;
244  if (os::Aix::get_meminfo(&mi)) {
245    return mi.real_free;
246  } else {
247    return ULONG_MAX;
248  }
249}
250
251julong os::physical_memory() {
252  return Aix::physical_memory();
253}
254
255// Return true if user is running as root.
256
257bool os::have_special_privileges() {
258  static bool init = false;
259  static bool privileges = false;
260  if (!init) {
261    privileges = (getuid() != geteuid()) || (getgid() != getegid());
262    init = true;
263  }
264  return privileges;
265}
266
267// Helper function, emulates disclaim64 using multiple 32bit disclaims
268// because we cannot use disclaim64() on AS/400 and old AIX releases.
269static bool my_disclaim64(char* addr, size_t size) {
270
271  if (size == 0) {
272    return true;
273  }
274
275  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
276  const unsigned int maxDisclaimSize = 0x40000000;
277
278  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
279  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
280
281  char* p = addr;
282
283  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
284    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
285      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
286      return false;
287    }
288    p += maxDisclaimSize;
289  }
290
291  if (lastDisclaimSize > 0) {
292    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
293      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
294      return false;
295    }
296  }
297
298  return true;
299}
300
301// Cpu architecture string
302#if defined(PPC32)
303static char cpu_arch[] = "ppc";
304#elif defined(PPC64)
305static char cpu_arch[] = "ppc64";
306#else
307#error Add appropriate cpu_arch setting
308#endif
309
310// Wrap the function "vmgetinfo" which is not available on older OS releases.
311static int checked_vmgetinfo(void *out, int command, int arg) {
312  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
313    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
314  }
315  return ::vmgetinfo(out, command, arg);
316}
317
318// Given an address, returns the size of the page backing that address.
319size_t os::Aix::query_pagesize(void* addr) {
320
321  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
322    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
323    return 4*K;
324  }
325
326  vm_page_info pi;
327  pi.addr = (uint64_t)addr;
328  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
329    return pi.pagesize;
330  } else {
331    assert(false, "vmgetinfo failed to retrieve page size");
332    return 4*K;
333  }
334}
335
336void os::Aix::initialize_system_info() {
337
338  // Get the number of online(logical) cpus instead of configured.
339  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
340  assert(_processor_count > 0, "_processor_count must be > 0");
341
342  // Retrieve total physical storage.
343  os::Aix::meminfo_t mi;
344  if (!os::Aix::get_meminfo(&mi)) {
345    assert(false, "os::Aix::get_meminfo failed.");
346  }
347  _physical_memory = (julong) mi.real_total;
348}
349
350// Helper function for tracing page sizes.
351static const char* describe_pagesize(size_t pagesize) {
352  switch (pagesize) {
353    case 4*K : return "4K";
354    case 64*K: return "64K";
355    case 16*M: return "16M";
356    case 16*G: return "16G";
357    default:
358      assert(false, "surprise");
359      return "??";
360  }
361}
362
363// Probe OS for multipage support.
364// Will fill the global g_multipage_support structure.
365// Must be called before calling os::large_page_init().
366static void query_multipage_support() {
367
368  guarantee(g_multipage_support.pagesize == -1,
369            "do not call twice");
370
371  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
372
373  // This really would surprise me.
374  assert(g_multipage_support.pagesize == 4*K, "surprise!");
375
376  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
377  // Default data page size is defined either by linker options (-bdatapsize)
378  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
379  // default should be 4K.
380  {
381    void* p = ::malloc(16*M);
382    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
383    ::free(p);
384  }
385
386  // Query default shm page size (LDR_CNTRL SHMPSIZE).
387  // Note that this is pure curiosity. We do not rely on default page size but set
388  // our own page size after allocated.
389  {
390    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
391    guarantee(shmid != -1, "shmget failed");
392    void* p = ::shmat(shmid, NULL, 0);
393    ::shmctl(shmid, IPC_RMID, NULL);
394    guarantee(p != (void*) -1, "shmat failed");
395    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
396    ::shmdt(p);
397  }
398
399  // Before querying the stack page size, make sure we are not running as primordial
400  // thread (because primordial thread's stack may have different page size than
401  // pthread thread stacks). Running a VM on the primordial thread won't work for a
402  // number of reasons so we may just as well guarantee it here.
403  guarantee0(!os::Aix::is_primordial_thread());
404
405  // Query pthread stack page size. Should be the same as data page size because
406  // pthread stacks are allocated from C-Heap.
407  {
408    int dummy = 0;
409    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
410  }
411
412  // Query default text page size (LDR_CNTRL TEXTPSIZE).
413  {
414    address any_function =
415      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
416    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
417  }
418
419  // Now probe for support of 64K pages and 16M pages.
420
421  // Before OS/400 V6R1, there is no support for pages other than 4K.
422  if (os::Aix::on_pase_V5R4_or_older()) {
423    trcVerbose("OS/400 < V6R1 - no large page support.");
424    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
425    goto query_multipage_support_end;
426  }
427
428  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
429  {
430    const int MAX_PAGE_SIZES = 4;
431    psize_t sizes[MAX_PAGE_SIZES];
432    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
433    if (num_psizes == -1) {
434      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
435      trcVerbose("disabling multipage support.");
436      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
437      goto query_multipage_support_end;
438    }
439    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
440    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
441    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
442    for (int i = 0; i < num_psizes; i ++) {
443      trcVerbose(" %s ", describe_pagesize(sizes[i]));
444    }
445
446    // Can we use 64K, 16M pages?
447    for (int i = 0; i < num_psizes; i ++) {
448      const size_t pagesize = sizes[i];
449      if (pagesize != 64*K && pagesize != 16*M) {
450        continue;
451      }
452      bool can_use = false;
453      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
454      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
455        IPC_CREAT | S_IRUSR | S_IWUSR);
456      guarantee0(shmid != -1); // Should always work.
457      // Try to set pagesize.
458      struct shmid_ds shm_buf = { 0 };
459      shm_buf.shm_pagesize = pagesize;
460      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
461        const int en = errno;
462        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
463        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
464          errno);
465      } else {
466        // Attach and double check pageisze.
467        void* p = ::shmat(shmid, NULL, 0);
468        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
469        guarantee0(p != (void*) -1); // Should always work.
470        const size_t real_pagesize = os::Aix::query_pagesize(p);
471        if (real_pagesize != pagesize) {
472          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
473        } else {
474          can_use = true;
475        }
476        ::shmdt(p);
477      }
478      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
479      if (pagesize == 64*K) {
480        g_multipage_support.can_use_64K_pages = can_use;
481      } else if (pagesize == 16*M) {
482        g_multipage_support.can_use_16M_pages = can_use;
483      }
484    }
485
486  } // end: check which pages can be used for shared memory
487
488query_multipage_support_end:
489
490  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
491      describe_pagesize(g_multipage_support.pagesize));
492  trcVerbose("Data page size (C-Heap, bss, etc): %s",
493      describe_pagesize(g_multipage_support.datapsize));
494  trcVerbose("Text page size: %s",
495      describe_pagesize(g_multipage_support.textpsize));
496  trcVerbose("Thread stack page size (pthread): %s",
497      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
498  trcVerbose("Default shared memory page size: %s",
499      describe_pagesize(g_multipage_support.shmpsize));
500  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
501      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
502  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
503      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
504  trcVerbose("Multipage error details: %d",
505      g_multipage_support.error);
506
507  // sanity checks
508  assert0(g_multipage_support.pagesize == 4*K);
509  assert0(g_multipage_support.datapsize == 4*K || g_multipage_support.datapsize == 64*K);
510  assert0(g_multipage_support.textpsize == 4*K || g_multipage_support.textpsize == 64*K);
511  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
512  assert0(g_multipage_support.shmpsize == 4*K || g_multipage_support.shmpsize == 64*K);
513
514}
515
516void os::init_system_properties_values() {
517
518#define DEFAULT_LIBPATH "/lib:/usr/lib"
519#define EXTENSIONS_DIR  "/lib/ext"
520
521  // Buffer that fits several sprintfs.
522  // Note that the space for the trailing null is provided
523  // by the nulls included by the sizeof operator.
524  const size_t bufsize =
525    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
526         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
527  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
528
529  // sysclasspath, java_home, dll_dir
530  {
531    char *pslash;
532    os::jvm_path(buf, bufsize);
533
534    // Found the full path to libjvm.so.
535    // Now cut the path to <java_home>/jre if we can.
536    pslash = strrchr(buf, '/');
537    if (pslash != NULL) {
538      *pslash = '\0';            // Get rid of /libjvm.so.
539    }
540    pslash = strrchr(buf, '/');
541    if (pslash != NULL) {
542      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
543    }
544    Arguments::set_dll_dir(buf);
545
546    if (pslash != NULL) {
547      pslash = strrchr(buf, '/');
548      if (pslash != NULL) {
549        *pslash = '\0';        // Get rid of /lib.
550      }
551    }
552    Arguments::set_java_home(buf);
553    set_boot_path('/', ':');
554  }
555
556  // Where to look for native libraries.
557
558  // On Aix we get the user setting of LIBPATH.
559  // Eventually, all the library path setting will be done here.
560  // Get the user setting of LIBPATH.
561  const char *v = ::getenv("LIBPATH");
562  const char *v_colon = ":";
563  if (v == NULL) { v = ""; v_colon = ""; }
564
565  // Concatenate user and invariant part of ld_library_path.
566  // That's +1 for the colon and +1 for the trailing '\0'.
567  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
568  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
569  Arguments::set_library_path(ld_library_path);
570  FREE_C_HEAP_ARRAY(char, ld_library_path);
571
572  // Extensions directories.
573  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
574  Arguments::set_ext_dirs(buf);
575
576  FREE_C_HEAP_ARRAY(char, buf);
577
578#undef DEFAULT_LIBPATH
579#undef EXTENSIONS_DIR
580}
581
582////////////////////////////////////////////////////////////////////////////////
583// breakpoint support
584
585void os::breakpoint() {
586  BREAKPOINT;
587}
588
589extern "C" void breakpoint() {
590  // use debugger to set breakpoint here
591}
592
593////////////////////////////////////////////////////////////////////////////////
594// signal support
595
596debug_only(static bool signal_sets_initialized = false);
597static sigset_t unblocked_sigs, vm_sigs;
598
599bool os::Aix::is_sig_ignored(int sig) {
600  struct sigaction oact;
601  sigaction(sig, (struct sigaction*)NULL, &oact);
602  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
603    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
604  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
605    return true;
606  } else {
607    return false;
608  }
609}
610
611void os::Aix::signal_sets_init() {
612  // Should also have an assertion stating we are still single-threaded.
613  assert(!signal_sets_initialized, "Already initialized");
614  // Fill in signals that are necessarily unblocked for all threads in
615  // the VM. Currently, we unblock the following signals:
616  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
617  //                         by -Xrs (=ReduceSignalUsage));
618  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
619  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
620  // the dispositions or masks wrt these signals.
621  // Programs embedding the VM that want to use the above signals for their
622  // own purposes must, at this time, use the "-Xrs" option to prevent
623  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
624  // (See bug 4345157, and other related bugs).
625  // In reality, though, unblocking these signals is really a nop, since
626  // these signals are not blocked by default.
627  sigemptyset(&unblocked_sigs);
628  sigaddset(&unblocked_sigs, SIGILL);
629  sigaddset(&unblocked_sigs, SIGSEGV);
630  sigaddset(&unblocked_sigs, SIGBUS);
631  sigaddset(&unblocked_sigs, SIGFPE);
632  sigaddset(&unblocked_sigs, SIGTRAP);
633  sigaddset(&unblocked_sigs, SR_signum);
634
635  if (!ReduceSignalUsage) {
636   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
637     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
638   }
639   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
640     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
641   }
642   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
643     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
644   }
645  }
646  // Fill in signals that are blocked by all but the VM thread.
647  sigemptyset(&vm_sigs);
648  if (!ReduceSignalUsage)
649    sigaddset(&vm_sigs, BREAK_SIGNAL);
650  debug_only(signal_sets_initialized = true);
651}
652
653// These are signals that are unblocked while a thread is running Java.
654// (For some reason, they get blocked by default.)
655sigset_t* os::Aix::unblocked_signals() {
656  assert(signal_sets_initialized, "Not initialized");
657  return &unblocked_sigs;
658}
659
660// These are the signals that are blocked while a (non-VM) thread is
661// running Java. Only the VM thread handles these signals.
662sigset_t* os::Aix::vm_signals() {
663  assert(signal_sets_initialized, "Not initialized");
664  return &vm_sigs;
665}
666
667void os::Aix::hotspot_sigmask(Thread* thread) {
668
669  //Save caller's signal mask before setting VM signal mask
670  sigset_t caller_sigmask;
671  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
672
673  OSThread* osthread = thread->osthread();
674  osthread->set_caller_sigmask(caller_sigmask);
675
676  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
677
678  if (!ReduceSignalUsage) {
679    if (thread->is_VM_thread()) {
680      // Only the VM thread handles BREAK_SIGNAL ...
681      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
682    } else {
683      // ... all other threads block BREAK_SIGNAL
684      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
685    }
686  }
687}
688
689// retrieve memory information.
690// Returns false if something went wrong;
691// content of pmi undefined in this case.
692bool os::Aix::get_meminfo(meminfo_t* pmi) {
693
694  assert(pmi, "get_meminfo: invalid parameter");
695
696  memset(pmi, 0, sizeof(meminfo_t));
697
698  if (os::Aix::on_pase()) {
699    // On PASE, use the libo4 porting library.
700
701    unsigned long long virt_total = 0;
702    unsigned long long real_total = 0;
703    unsigned long long real_free = 0;
704    unsigned long long pgsp_total = 0;
705    unsigned long long pgsp_free = 0;
706    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
707      pmi->virt_total = virt_total;
708      pmi->real_total = real_total;
709      pmi->real_free = real_free;
710      pmi->pgsp_total = pgsp_total;
711      pmi->pgsp_free = pgsp_free;
712      return true;
713    }
714    return false;
715
716  } else {
717
718    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
719    // See:
720    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
721    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
722    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
723    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
724
725    perfstat_memory_total_t psmt;
726    memset (&psmt, '\0', sizeof(psmt));
727    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
728    if (rc == -1) {
729      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
730      assert(0, "perfstat_memory_total() failed");
731      return false;
732    }
733
734    assert(rc == 1, "perfstat_memory_total() - weird return code");
735
736    // excerpt from
737    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
738    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
739    // The fields of perfstat_memory_total_t:
740    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
741    // u_longlong_t real_total         Total real memory (in 4 KB pages).
742    // u_longlong_t real_free          Free real memory (in 4 KB pages).
743    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
744    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
745
746    pmi->virt_total = psmt.virt_total * 4096;
747    pmi->real_total = psmt.real_total * 4096;
748    pmi->real_free = psmt.real_free * 4096;
749    pmi->pgsp_total = psmt.pgsp_total * 4096;
750    pmi->pgsp_free = psmt.pgsp_free * 4096;
751
752    return true;
753
754  }
755} // end os::Aix::get_meminfo
756
757//////////////////////////////////////////////////////////////////////////////
758// create new thread
759
760// Thread start routine for all newly created threads
761static void *thread_native_entry(Thread *thread) {
762
763  // find out my own stack dimensions
764  {
765    // actually, this should do exactly the same as thread->record_stack_base_and_size...
766    thread->set_stack_base(os::current_stack_base());
767    thread->set_stack_size(os::current_stack_size());
768  }
769
770  const pthread_t pthread_id = ::pthread_self();
771  const tid_t kernel_thread_id = ::thread_self();
772
773  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
774    os::current_thread_id(), (uintx) kernel_thread_id);
775
776  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
777  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
778  // tools hook pthread_create(). In this case, we may run into problems establishing
779  // guard pages on those stacks, because the stacks may reside in memory which is not
780  // protectable (shmated).
781  if (thread->stack_base() > ::sbrk(0)) {
782    log_warning(os, thread)("Thread stack not in data segment.");
783  }
784
785  // Try to randomize the cache line index of hot stack frames.
786  // This helps when threads of the same stack traces evict each other's
787  // cache lines. The threads can be either from the same JVM instance, or
788  // from different JVM instances. The benefit is especially true for
789  // processors with hyperthreading technology.
790
791  static int counter = 0;
792  int pid = os::current_process_id();
793  alloca(((pid ^ counter++) & 7) * 128);
794
795  thread->initialize_thread_current();
796
797  OSThread* osthread = thread->osthread();
798
799  // Thread_id is pthread id.
800  osthread->set_thread_id(pthread_id);
801
802  // .. but keep kernel thread id too for diagnostics
803  osthread->set_kernel_thread_id(kernel_thread_id);
804
805  // Initialize signal mask for this thread.
806  os::Aix::hotspot_sigmask(thread);
807
808  // Initialize floating point control register.
809  os::Aix::init_thread_fpu_state();
810
811  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
812
813  // Call one more level start routine.
814  thread->run();
815
816  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
817    os::current_thread_id(), (uintx) kernel_thread_id);
818
819  // If a thread has not deleted itself ("delete this") as part of its
820  // termination sequence, we have to ensure thread-local-storage is
821  // cleared before we actually terminate. No threads should ever be
822  // deleted asynchronously with respect to their termination.
823  if (Thread::current_or_null_safe() != NULL) {
824    assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
825    thread->clear_thread_current();
826  }
827
828  return 0;
829}
830
831bool os::create_thread(Thread* thread, ThreadType thr_type,
832                       size_t req_stack_size) {
833
834  assert(thread->osthread() == NULL, "caller responsible");
835
836  // Allocate the OSThread object.
837  OSThread* osthread = new OSThread(NULL, NULL);
838  if (osthread == NULL) {
839    return false;
840  }
841
842  // Set the correct thread state.
843  osthread->set_thread_type(thr_type);
844
845  // Initial state is ALLOCATED but not INITIALIZED
846  osthread->set_state(ALLOCATED);
847
848  thread->set_osthread(osthread);
849
850  // Init thread attributes.
851  pthread_attr_t attr;
852  pthread_attr_init(&attr);
853  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
854
855  // Make sure we run in 1:1 kernel-user-thread mode.
856  if (os::Aix::on_aix()) {
857    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
858    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
859  }
860
861  // Start in suspended state, and in os::thread_start, wake the thread up.
862  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
863
864  // Calculate stack size if it's not specified by caller.
865  size_t stack_size = os::Posix::get_initial_stack_size(thr_type, req_stack_size);
866
867  // On Aix, pthread_attr_setstacksize fails with huge values and leaves the
868  // thread size in attr unchanged. If this is the minimal stack size as set
869  // by pthread_attr_init this leads to crashes after thread creation. E.g. the
870  // guard pages might not fit on the tiny stack created.
871  int ret = pthread_attr_setstacksize(&attr, stack_size);
872  if (ret != 0) {
873    log_warning(os, thread)("The thread stack size specified is invalid: " SIZE_FORMAT "k",
874                            stack_size / K);
875  }
876
877  // Configure libc guard page.
878  ret = pthread_attr_setguardsize(&attr, os::Aix::default_guard_size(thr_type));
879
880  pthread_t tid = 0;
881  if (ret == 0) {
882    ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
883  }
884
885  if (ret == 0) {
886    char buf[64];
887    log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
888      (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
889  } else {
890    char buf[64];
891    log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
892      ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
893  }
894
895  pthread_attr_destroy(&attr);
896
897  if (ret != 0) {
898    // Need to clean up stuff we've allocated so far.
899    thread->set_osthread(NULL);
900    delete osthread;
901    return false;
902  }
903
904  // OSThread::thread_id is the pthread id.
905  osthread->set_thread_id(tid);
906
907  return true;
908}
909
910/////////////////////////////////////////////////////////////////////////////
911// attach existing thread
912
913// bootstrap the main thread
914bool os::create_main_thread(JavaThread* thread) {
915  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
916  return create_attached_thread(thread);
917}
918
919bool os::create_attached_thread(JavaThread* thread) {
920#ifdef ASSERT
921    thread->verify_not_published();
922#endif
923
924  // Allocate the OSThread object
925  OSThread* osthread = new OSThread(NULL, NULL);
926
927  if (osthread == NULL) {
928    return false;
929  }
930
931  const pthread_t pthread_id = ::pthread_self();
932  const tid_t kernel_thread_id = ::thread_self();
933
934  // OSThread::thread_id is the pthread id.
935  osthread->set_thread_id(pthread_id);
936
937  // .. but keep kernel thread id too for diagnostics
938  osthread->set_kernel_thread_id(kernel_thread_id);
939
940  // initialize floating point control register
941  os::Aix::init_thread_fpu_state();
942
943  // Initial thread state is RUNNABLE
944  osthread->set_state(RUNNABLE);
945
946  thread->set_osthread(osthread);
947
948  if (UseNUMA) {
949    int lgrp_id = os::numa_get_group_id();
950    if (lgrp_id != -1) {
951      thread->set_lgrp_id(lgrp_id);
952    }
953  }
954
955  // initialize signal mask for this thread
956  // and save the caller's signal mask
957  os::Aix::hotspot_sigmask(thread);
958
959  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
960    os::current_thread_id(), (uintx) kernel_thread_id);
961
962  return true;
963}
964
965void os::pd_start_thread(Thread* thread) {
966  int status = pthread_continue_np(thread->osthread()->pthread_id());
967  assert(status == 0, "thr_continue failed");
968}
969
970// Free OS resources related to the OSThread
971void os::free_thread(OSThread* osthread) {
972  assert(osthread != NULL, "osthread not set");
973
974  // We are told to free resources of the argument thread,
975  // but we can only really operate on the current thread.
976  assert(Thread::current()->osthread() == osthread,
977         "os::free_thread but not current thread");
978
979  // Restore caller's signal mask
980  sigset_t sigmask = osthread->caller_sigmask();
981  pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
982
983  delete osthread;
984}
985
986////////////////////////////////////////////////////////////////////////////////
987// time support
988
989// Time since start-up in seconds to a fine granularity.
990// Used by VMSelfDestructTimer and the MemProfiler.
991double os::elapsedTime() {
992  return (double)(os::elapsed_counter()) * 0.000001;
993}
994
995jlong os::elapsed_counter() {
996  timeval time;
997  int status = gettimeofday(&time, NULL);
998  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
999}
1000
1001jlong os::elapsed_frequency() {
1002  return (1000 * 1000);
1003}
1004
1005bool os::supports_vtime() { return true; }
1006bool os::enable_vtime()   { return false; }
1007bool os::vtime_enabled()  { return false; }
1008
1009double os::elapsedVTime() {
1010  struct rusage usage;
1011  int retval = getrusage(RUSAGE_THREAD, &usage);
1012  if (retval == 0) {
1013    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1014  } else {
1015    // better than nothing, but not much
1016    return elapsedTime();
1017  }
1018}
1019
1020jlong os::javaTimeMillis() {
1021  timeval time;
1022  int status = gettimeofday(&time, NULL);
1023  assert(status != -1, "aix error at gettimeofday()");
1024  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1025}
1026
1027void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1028  timeval time;
1029  int status = gettimeofday(&time, NULL);
1030  assert(status != -1, "aix error at gettimeofday()");
1031  seconds = jlong(time.tv_sec);
1032  nanos = jlong(time.tv_usec) * 1000;
1033}
1034
1035jlong os::javaTimeNanos() {
1036  if (os::Aix::on_pase()) {
1037
1038    timeval time;
1039    int status = gettimeofday(&time, NULL);
1040    assert(status != -1, "PASE error at gettimeofday()");
1041    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1042    return 1000 * usecs;
1043
1044  } else {
1045    // On AIX use the precision of processors real time clock
1046    // or time base registers.
1047    timebasestruct_t time;
1048    int rc;
1049
1050    // If the CPU has a time register, it will be used and
1051    // we have to convert to real time first. After convertion we have following data:
1052    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1053    // time.tb_low  [nanoseconds after the last full second above]
1054    // We better use mread_real_time here instead of read_real_time
1055    // to ensure that we will get a monotonic increasing time.
1056    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1057      rc = time_base_to_time(&time, TIMEBASE_SZ);
1058      assert(rc != -1, "aix error at time_base_to_time()");
1059    }
1060    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1061  }
1062}
1063
1064void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1065  info_ptr->max_value = ALL_64_BITS;
1066  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1067  info_ptr->may_skip_backward = false;
1068  info_ptr->may_skip_forward = false;
1069  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1070}
1071
1072// Return the real, user, and system times in seconds from an
1073// arbitrary fixed point in the past.
1074bool os::getTimesSecs(double* process_real_time,
1075                      double* process_user_time,
1076                      double* process_system_time) {
1077  struct tms ticks;
1078  clock_t real_ticks = times(&ticks);
1079
1080  if (real_ticks == (clock_t) (-1)) {
1081    return false;
1082  } else {
1083    double ticks_per_second = (double) clock_tics_per_sec;
1084    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1085    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1086    *process_real_time = ((double) real_ticks) / ticks_per_second;
1087
1088    return true;
1089  }
1090}
1091
1092char * os::local_time_string(char *buf, size_t buflen) {
1093  struct tm t;
1094  time_t long_time;
1095  time(&long_time);
1096  localtime_r(&long_time, &t);
1097  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1098               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1099               t.tm_hour, t.tm_min, t.tm_sec);
1100  return buf;
1101}
1102
1103struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1104  return localtime_r(clock, res);
1105}
1106
1107////////////////////////////////////////////////////////////////////////////////
1108// runtime exit support
1109
1110// Note: os::shutdown() might be called very early during initialization, or
1111// called from signal handler. Before adding something to os::shutdown(), make
1112// sure it is async-safe and can handle partially initialized VM.
1113void os::shutdown() {
1114
1115  // allow PerfMemory to attempt cleanup of any persistent resources
1116  perfMemory_exit();
1117
1118  // needs to remove object in file system
1119  AttachListener::abort();
1120
1121  // flush buffered output, finish log files
1122  ostream_abort();
1123
1124  // Check for abort hook
1125  abort_hook_t abort_hook = Arguments::abort_hook();
1126  if (abort_hook != NULL) {
1127    abort_hook();
1128  }
1129}
1130
1131// Note: os::abort() might be called very early during initialization, or
1132// called from signal handler. Before adding something to os::abort(), make
1133// sure it is async-safe and can handle partially initialized VM.
1134void os::abort(bool dump_core, void* siginfo, const void* context) {
1135  os::shutdown();
1136  if (dump_core) {
1137#ifndef PRODUCT
1138    fdStream out(defaultStream::output_fd());
1139    out.print_raw("Current thread is ");
1140    char buf[16];
1141    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1142    out.print_raw_cr(buf);
1143    out.print_raw_cr("Dumping core ...");
1144#endif
1145    ::abort(); // dump core
1146  }
1147
1148  ::exit(1);
1149}
1150
1151// Die immediately, no exit hook, no abort hook, no cleanup.
1152void os::die() {
1153  ::abort();
1154}
1155
1156// This method is a copy of JDK's sysGetLastErrorString
1157// from src/solaris/hpi/src/system_md.c
1158
1159size_t os::lasterror(char *buf, size_t len) {
1160  if (errno == 0) return 0;
1161
1162  const char *s = os::strerror(errno);
1163  size_t n = ::strlen(s);
1164  if (n >= len) {
1165    n = len - 1;
1166  }
1167  ::strncpy(buf, s, n);
1168  buf[n] = '\0';
1169  return n;
1170}
1171
1172intx os::current_thread_id() {
1173  return (intx)pthread_self();
1174}
1175
1176int os::current_process_id() {
1177  return getpid();
1178}
1179
1180// DLL functions
1181
1182const char* os::dll_file_extension() { return ".so"; }
1183
1184// This must be hard coded because it's the system's temporary
1185// directory not the java application's temp directory, ala java.io.tmpdir.
1186const char* os::get_temp_directory() { return "/tmp"; }
1187
1188// Check if addr is inside libjvm.so.
1189bool os::address_is_in_vm(address addr) {
1190
1191  // Input could be a real pc or a function pointer literal. The latter
1192  // would be a function descriptor residing in the data segment of a module.
1193  loaded_module_t lm;
1194  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1195    return lm.is_in_vm;
1196  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1197    return lm.is_in_vm;
1198  } else {
1199    return false;
1200  }
1201
1202}
1203
1204// Resolve an AIX function descriptor literal to a code pointer.
1205// If the input is a valid code pointer to a text segment of a loaded module,
1206//   it is returned unchanged.
1207// If the input is a valid AIX function descriptor, it is resolved to the
1208//   code entry point.
1209// If the input is neither a valid function descriptor nor a valid code pointer,
1210//   NULL is returned.
1211static address resolve_function_descriptor_to_code_pointer(address p) {
1212
1213  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1214    // It is a real code pointer.
1215    return p;
1216  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1217    // Pointer to data segment, potential function descriptor.
1218    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1219    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1220      // It is a function descriptor.
1221      return code_entry;
1222    }
1223  }
1224
1225  return NULL;
1226}
1227
1228bool os::dll_address_to_function_name(address addr, char *buf,
1229                                      int buflen, int *offset,
1230                                      bool demangle) {
1231  if (offset) {
1232    *offset = -1;
1233  }
1234  // Buf is not optional, but offset is optional.
1235  assert(buf != NULL, "sanity check");
1236  buf[0] = '\0';
1237
1238  // Resolve function ptr literals first.
1239  addr = resolve_function_descriptor_to_code_pointer(addr);
1240  if (!addr) {
1241    return false;
1242  }
1243
1244  return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1245}
1246
1247bool os::dll_address_to_library_name(address addr, char* buf,
1248                                     int buflen, int* offset) {
1249  if (offset) {
1250    *offset = -1;
1251  }
1252  // Buf is not optional, but offset is optional.
1253  assert(buf != NULL, "sanity check");
1254  buf[0] = '\0';
1255
1256  // Resolve function ptr literals first.
1257  addr = resolve_function_descriptor_to_code_pointer(addr);
1258  if (!addr) {
1259    return false;
1260  }
1261
1262  return AixSymbols::get_module_name(addr, buf, buflen);
1263}
1264
1265// Loads .dll/.so and in case of error it checks if .dll/.so was built
1266// for the same architecture as Hotspot is running on.
1267void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1268
1269  if (ebuf && ebuflen > 0) {
1270    ebuf[0] = '\0';
1271    ebuf[ebuflen - 1] = '\0';
1272  }
1273
1274  if (!filename || strlen(filename) == 0) {
1275    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1276    return NULL;
1277  }
1278
1279  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1280  void * result= ::dlopen(filename, RTLD_LAZY);
1281  if (result != NULL) {
1282    // Reload dll cache. Don't do this in signal handling.
1283    LoadedLibraries::reload();
1284    return result;
1285  } else {
1286    // error analysis when dlopen fails
1287    const char* const error_report = ::dlerror();
1288    if (error_report && ebuf && ebuflen > 0) {
1289      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1290               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1291    }
1292  }
1293  return NULL;
1294}
1295
1296void* os::dll_lookup(void* handle, const char* name) {
1297  void* res = dlsym(handle, name);
1298  return res;
1299}
1300
1301void* os::get_default_process_handle() {
1302  return (void*)::dlopen(NULL, RTLD_LAZY);
1303}
1304
1305void os::print_dll_info(outputStream *st) {
1306  st->print_cr("Dynamic libraries:");
1307  LoadedLibraries::print(st);
1308}
1309
1310void os::get_summary_os_info(char* buf, size_t buflen) {
1311  // There might be something more readable than uname results for AIX.
1312  struct utsname name;
1313  uname(&name);
1314  snprintf(buf, buflen, "%s %s", name.release, name.version);
1315}
1316
1317void os::print_os_info(outputStream* st) {
1318  st->print("OS:");
1319
1320  st->print("uname:");
1321  struct utsname name;
1322  uname(&name);
1323  st->print(name.sysname); st->print(" ");
1324  st->print(name.nodename); st->print(" ");
1325  st->print(name.release); st->print(" ");
1326  st->print(name.version); st->print(" ");
1327  st->print(name.machine);
1328  st->cr();
1329
1330  uint32_t ver = os::Aix::os_version();
1331  st->print_cr("AIX kernel version %u.%u.%u.%u",
1332               (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1333
1334  os::Posix::print_rlimit_info(st);
1335
1336  // load average
1337  st->print("load average:");
1338  double loadavg[3] = {-1.L, -1.L, -1.L};
1339  os::loadavg(loadavg, 3);
1340  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1341  st->cr();
1342
1343  // print wpar info
1344  libperfstat::wparinfo_t wi;
1345  if (libperfstat::get_wparinfo(&wi)) {
1346    st->print_cr("wpar info");
1347    st->print_cr("name: %s", wi.name);
1348    st->print_cr("id:   %d", wi.wpar_id);
1349    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1350  }
1351
1352  // print partition info
1353  libperfstat::partitioninfo_t pi;
1354  if (libperfstat::get_partitioninfo(&pi)) {
1355    st->print_cr("partition info");
1356    st->print_cr(" name: %s", pi.name);
1357  }
1358
1359}
1360
1361void os::print_memory_info(outputStream* st) {
1362
1363  st->print_cr("Memory:");
1364
1365  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1366    describe_pagesize(g_multipage_support.pagesize));
1367  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1368    describe_pagesize(g_multipage_support.datapsize));
1369  st->print_cr("  Text page size:                         %s",
1370    describe_pagesize(g_multipage_support.textpsize));
1371  st->print_cr("  Thread stack page size (pthread):       %s",
1372    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1373  st->print_cr("  Default shared memory page size:        %s",
1374    describe_pagesize(g_multipage_support.shmpsize));
1375  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1376    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1377  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1378    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1379  st->print_cr("  Multipage error: %d",
1380    g_multipage_support.error);
1381  st->cr();
1382  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1383
1384  // print out LDR_CNTRL because it affects the default page sizes
1385  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1386  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1387
1388  // Print out EXTSHM because it is an unsupported setting.
1389  const char* const extshm = ::getenv("EXTSHM");
1390  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1391  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1392    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1393  }
1394
1395  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1396  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1397  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1398      aixthread_guardpages ? aixthread_guardpages : "<unset>");
1399
1400  os::Aix::meminfo_t mi;
1401  if (os::Aix::get_meminfo(&mi)) {
1402    char buffer[256];
1403    if (os::Aix::on_aix()) {
1404      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1405      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1406      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1407      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1408    } else {
1409      // PASE - Numbers are result of QWCRSSTS; they mean:
1410      // real_total: Sum of all system pools
1411      // real_free: always 0
1412      // pgsp_total: we take the size of the system ASP
1413      // pgsp_free: size of system ASP times percentage of system ASP unused
1414      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1415      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1416      st->print_cr("%% system asp used : " SIZE_FORMAT,
1417        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1418    }
1419    st->print_raw(buffer);
1420  }
1421  st->cr();
1422
1423  // Print segments allocated with os::reserve_memory.
1424  st->print_cr("internal virtual memory regions used by vm:");
1425  vmembk_print_on(st);
1426}
1427
1428// Get a string for the cpuinfo that is a summary of the cpu type
1429void os::get_summary_cpu_info(char* buf, size_t buflen) {
1430  // This looks good
1431  libperfstat::cpuinfo_t ci;
1432  if (libperfstat::get_cpuinfo(&ci)) {
1433    strncpy(buf, ci.version, buflen);
1434  } else {
1435    strncpy(buf, "AIX", buflen);
1436  }
1437}
1438
1439void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1440  // Nothing to do beyond what os::print_cpu_info() does.
1441}
1442
1443static void print_signal_handler(outputStream* st, int sig,
1444                                 char* buf, size_t buflen);
1445
1446void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1447  st->print_cr("Signal Handlers:");
1448  print_signal_handler(st, SIGSEGV, buf, buflen);
1449  print_signal_handler(st, SIGBUS , buf, buflen);
1450  print_signal_handler(st, SIGFPE , buf, buflen);
1451  print_signal_handler(st, SIGPIPE, buf, buflen);
1452  print_signal_handler(st, SIGXFSZ, buf, buflen);
1453  print_signal_handler(st, SIGILL , buf, buflen);
1454  print_signal_handler(st, SR_signum, buf, buflen);
1455  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1456  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1457  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1458  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1459  print_signal_handler(st, SIGTRAP, buf, buflen);
1460  // We also want to know if someone else adds a SIGDANGER handler because
1461  // that will interfere with OOM killling.
1462  print_signal_handler(st, SIGDANGER, buf, buflen);
1463}
1464
1465static char saved_jvm_path[MAXPATHLEN] = {0};
1466
1467// Find the full path to the current module, libjvm.so.
1468void os::jvm_path(char *buf, jint buflen) {
1469  // Error checking.
1470  if (buflen < MAXPATHLEN) {
1471    assert(false, "must use a large-enough buffer");
1472    buf[0] = '\0';
1473    return;
1474  }
1475  // Lazy resolve the path to current module.
1476  if (saved_jvm_path[0] != 0) {
1477    strcpy(buf, saved_jvm_path);
1478    return;
1479  }
1480
1481  Dl_info dlinfo;
1482  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1483  assert(ret != 0, "cannot locate libjvm");
1484  char* rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1485  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1486
1487  if (Arguments::sun_java_launcher_is_altjvm()) {
1488    // Support for the java launcher's '-XXaltjvm=<path>' option. Typical
1489    // value for buf is "<JAVA_HOME>/jre/lib/<vmtype>/libjvm.so".
1490    // If "/jre/lib/" appears at the right place in the string, then
1491    // assume we are installed in a JDK and we're done. Otherwise, check
1492    // for a JAVA_HOME environment variable and fix up the path so it
1493    // looks like libjvm.so is installed there (append a fake suffix
1494    // hotspot/libjvm.so).
1495    const char *p = buf + strlen(buf) - 1;
1496    for (int count = 0; p > buf && count < 4; ++count) {
1497      for (--p; p > buf && *p != '/'; --p)
1498        /* empty */ ;
1499    }
1500
1501    if (strncmp(p, "/jre/lib/", 9) != 0) {
1502      // Look for JAVA_HOME in the environment.
1503      char* java_home_var = ::getenv("JAVA_HOME");
1504      if (java_home_var != NULL && java_home_var[0] != 0) {
1505        char* jrelib_p;
1506        int len;
1507
1508        // Check the current module name "libjvm.so".
1509        p = strrchr(buf, '/');
1510        if (p == NULL) {
1511          return;
1512        }
1513        assert(strstr(p, "/libjvm") == p, "invalid library name");
1514
1515        rp = os::Posix::realpath(java_home_var, buf, buflen);
1516        if (rp == NULL) {
1517          return;
1518        }
1519
1520        // determine if this is a legacy image or modules image
1521        // modules image doesn't have "jre" subdirectory
1522        len = strlen(buf);
1523        assert(len < buflen, "Ran out of buffer room");
1524        jrelib_p = buf + len;
1525        snprintf(jrelib_p, buflen-len, "/jre/lib");
1526        if (0 != access(buf, F_OK)) {
1527          snprintf(jrelib_p, buflen-len, "/lib");
1528        }
1529
1530        if (0 == access(buf, F_OK)) {
1531          // Use current module name "libjvm.so"
1532          len = strlen(buf);
1533          snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
1534        } else {
1535          // Go back to path of .so
1536          rp = os::Posix::realpath((char *)dlinfo.dli_fname, buf, buflen);
1537          if (rp == NULL) {
1538            return;
1539          }
1540        }
1541      }
1542    }
1543  }
1544
1545  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1546  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1547}
1548
1549void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1550  // no prefix required, not even "_"
1551}
1552
1553void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1554  // no suffix required
1555}
1556
1557////////////////////////////////////////////////////////////////////////////////
1558// sun.misc.Signal support
1559
1560static volatile jint sigint_count = 0;
1561
1562static void
1563UserHandler(int sig, void *siginfo, void *context) {
1564  // 4511530 - sem_post is serialized and handled by the manager thread. When
1565  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1566  // don't want to flood the manager thread with sem_post requests.
1567  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1568    return;
1569
1570  // Ctrl-C is pressed during error reporting, likely because the error
1571  // handler fails to abort. Let VM die immediately.
1572  if (sig == SIGINT && VMError::is_error_reported()) {
1573    os::die();
1574  }
1575
1576  os::signal_notify(sig);
1577}
1578
1579void* os::user_handler() {
1580  return CAST_FROM_FN_PTR(void*, UserHandler);
1581}
1582
1583extern "C" {
1584  typedef void (*sa_handler_t)(int);
1585  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1586}
1587
1588void* os::signal(int signal_number, void* handler) {
1589  struct sigaction sigAct, oldSigAct;
1590
1591  sigfillset(&(sigAct.sa_mask));
1592
1593  // Do not block out synchronous signals in the signal handler.
1594  // Blocking synchronous signals only makes sense if you can really
1595  // be sure that those signals won't happen during signal handling,
1596  // when the blocking applies. Normal signal handlers are lean and
1597  // do not cause signals. But our signal handlers tend to be "risky"
1598  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1599  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1600  // by a SIGILL, which was blocked due to the signal mask. The process
1601  // just hung forever. Better to crash from a secondary signal than to hang.
1602  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1603  sigdelset(&(sigAct.sa_mask), SIGBUS);
1604  sigdelset(&(sigAct.sa_mask), SIGILL);
1605  sigdelset(&(sigAct.sa_mask), SIGFPE);
1606  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1607
1608  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1609
1610  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1611
1612  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1613    // -1 means registration failed
1614    return (void *)-1;
1615  }
1616
1617  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1618}
1619
1620void os::signal_raise(int signal_number) {
1621  ::raise(signal_number);
1622}
1623
1624//
1625// The following code is moved from os.cpp for making this
1626// code platform specific, which it is by its very nature.
1627//
1628
1629// Will be modified when max signal is changed to be dynamic
1630int os::sigexitnum_pd() {
1631  return NSIG;
1632}
1633
1634// a counter for each possible signal value
1635static volatile jint pending_signals[NSIG+1] = { 0 };
1636
1637// Wrapper functions for: sem_init(), sem_post(), sem_wait()
1638// On AIX, we use sem_init(), sem_post(), sem_wait()
1639// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1640// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1641// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1642// on AIX, msem_..() calls are suspected of causing problems.
1643static sem_t sig_sem;
1644static msemaphore* p_sig_msem = 0;
1645
1646static void local_sem_init() {
1647  if (os::Aix::on_aix()) {
1648    int rc = ::sem_init(&sig_sem, 0, 0);
1649    guarantee(rc != -1, "sem_init failed");
1650  } else {
1651    // Memory semaphores must live in shared mem.
1652    guarantee0(p_sig_msem == NULL);
1653    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1654    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1655    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1656  }
1657}
1658
1659static void local_sem_post() {
1660  static bool warn_only_once = false;
1661  if (os::Aix::on_aix()) {
1662    int rc = ::sem_post(&sig_sem);
1663    if (rc == -1 && !warn_only_once) {
1664      trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1665      warn_only_once = true;
1666    }
1667  } else {
1668    guarantee0(p_sig_msem != NULL);
1669    int rc = ::msem_unlock(p_sig_msem, 0);
1670    if (rc == -1 && !warn_only_once) {
1671      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1672      warn_only_once = true;
1673    }
1674  }
1675}
1676
1677static void local_sem_wait() {
1678  static bool warn_only_once = false;
1679  if (os::Aix::on_aix()) {
1680    int rc = ::sem_wait(&sig_sem);
1681    if (rc == -1 && !warn_only_once) {
1682      trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1683      warn_only_once = true;
1684    }
1685  } else {
1686    guarantee0(p_sig_msem != NULL); // must init before use
1687    int rc = ::msem_lock(p_sig_msem, 0);
1688    if (rc == -1 && !warn_only_once) {
1689      trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1690      warn_only_once = true;
1691    }
1692  }
1693}
1694
1695void os::signal_init_pd() {
1696  // Initialize signal structures
1697  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1698
1699  // Initialize signal semaphore
1700  local_sem_init();
1701}
1702
1703void os::signal_notify(int sig) {
1704  Atomic::inc(&pending_signals[sig]);
1705  local_sem_post();
1706}
1707
1708static int check_pending_signals(bool wait) {
1709  Atomic::store(0, &sigint_count);
1710  for (;;) {
1711    for (int i = 0; i < NSIG + 1; i++) {
1712      jint n = pending_signals[i];
1713      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1714        return i;
1715      }
1716    }
1717    if (!wait) {
1718      return -1;
1719    }
1720    JavaThread *thread = JavaThread::current();
1721    ThreadBlockInVM tbivm(thread);
1722
1723    bool threadIsSuspended;
1724    do {
1725      thread->set_suspend_equivalent();
1726      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1727
1728      local_sem_wait();
1729
1730      // were we externally suspended while we were waiting?
1731      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1732      if (threadIsSuspended) {
1733        //
1734        // The semaphore has been incremented, but while we were waiting
1735        // another thread suspended us. We don't want to continue running
1736        // while suspended because that would surprise the thread that
1737        // suspended us.
1738        //
1739
1740        local_sem_post();
1741
1742        thread->java_suspend_self();
1743      }
1744    } while (threadIsSuspended);
1745  }
1746}
1747
1748int os::signal_lookup() {
1749  return check_pending_signals(false);
1750}
1751
1752int os::signal_wait() {
1753  return check_pending_signals(true);
1754}
1755
1756////////////////////////////////////////////////////////////////////////////////
1757// Virtual Memory
1758
1759// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1760
1761#define VMEM_MAPPED  1
1762#define VMEM_SHMATED 2
1763
1764struct vmembk_t {
1765  int type;         // 1 - mmap, 2 - shmat
1766  char* addr;
1767  size_t size;      // Real size, may be larger than usersize.
1768  size_t pagesize;  // page size of area
1769  vmembk_t* next;
1770
1771  bool contains_addr(char* p) const {
1772    return p >= addr && p < (addr + size);
1773  }
1774
1775  bool contains_range(char* p, size_t s) const {
1776    return contains_addr(p) && contains_addr(p + s - 1);
1777  }
1778
1779  void print_on(outputStream* os) const {
1780    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1781      " bytes, %d %s pages), %s",
1782      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1783      (type == VMEM_SHMATED ? "shmat" : "mmap")
1784    );
1785  }
1786
1787  // Check that range is a sub range of memory block (or equal to memory block);
1788  // also check that range is fully page aligned to the page size if the block.
1789  void assert_is_valid_subrange(char* p, size_t s) const {
1790    if (!contains_range(p, s)) {
1791      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1792              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1793              p, p + s, addr, addr + size);
1794      guarantee0(false);
1795    }
1796    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1797      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1798              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1799      guarantee0(false);
1800    }
1801  }
1802};
1803
1804static struct {
1805  vmembk_t* first;
1806  MiscUtils::CritSect cs;
1807} vmem;
1808
1809static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1810  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1811  assert0(p);
1812  if (p) {
1813    MiscUtils::AutoCritSect lck(&vmem.cs);
1814    p->addr = addr; p->size = size;
1815    p->pagesize = pagesize;
1816    p->type = type;
1817    p->next = vmem.first;
1818    vmem.first = p;
1819  }
1820}
1821
1822static vmembk_t* vmembk_find(char* addr) {
1823  MiscUtils::AutoCritSect lck(&vmem.cs);
1824  for (vmembk_t* p = vmem.first; p; p = p->next) {
1825    if (p->addr <= addr && (p->addr + p->size) > addr) {
1826      return p;
1827    }
1828  }
1829  return NULL;
1830}
1831
1832static void vmembk_remove(vmembk_t* p0) {
1833  MiscUtils::AutoCritSect lck(&vmem.cs);
1834  assert0(p0);
1835  assert0(vmem.first); // List should not be empty.
1836  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1837    if (*pp == p0) {
1838      *pp = p0->next;
1839      ::free(p0);
1840      return;
1841    }
1842  }
1843  assert0(false); // Not found?
1844}
1845
1846static void vmembk_print_on(outputStream* os) {
1847  MiscUtils::AutoCritSect lck(&vmem.cs);
1848  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1849    vmi->print_on(os);
1850    os->cr();
1851  }
1852}
1853
1854// Reserve and attach a section of System V memory.
1855// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1856// address. Failing that, it will attach the memory anywhere.
1857// If <requested_addr> is NULL, function will attach the memory anywhere.
1858//
1859// <alignment_hint> is being ignored by this function. It is very probable however that the
1860// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1861// Should this be not enogh, we can put more work into it.
1862static char* reserve_shmated_memory (
1863  size_t bytes,
1864  char* requested_addr,
1865  size_t alignment_hint) {
1866
1867  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1868    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1869    bytes, requested_addr, alignment_hint);
1870
1871  // Either give me wish address or wish alignment but not both.
1872  assert0(!(requested_addr != NULL && alignment_hint != 0));
1873
1874  // We must prevent anyone from attaching too close to the
1875  // BRK because that may cause malloc OOM.
1876  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1877    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1878      "Will attach anywhere.", requested_addr);
1879    // Act like the OS refused to attach there.
1880    requested_addr = NULL;
1881  }
1882
1883  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1884  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1885  if (os::Aix::on_pase_V5R4_or_older()) {
1886    ShouldNotReachHere();
1887  }
1888
1889  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1890  const size_t size = align_up(bytes, 64*K);
1891
1892  // Reserve the shared segment.
1893  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1894  if (shmid == -1) {
1895    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1896    return NULL;
1897  }
1898
1899  // Important note:
1900  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1901  // We must right after attaching it remove it from the system. System V shm segments are global and
1902  // survive the process.
1903  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1904
1905  struct shmid_ds shmbuf;
1906  memset(&shmbuf, 0, sizeof(shmbuf));
1907  shmbuf.shm_pagesize = 64*K;
1908  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1909    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1910               size / (64*K), errno);
1911    // I want to know if this ever happens.
1912    assert(false, "failed to set page size for shmat");
1913  }
1914
1915  // Now attach the shared segment.
1916  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1917  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1918  // were not a segment boundary.
1919  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1920  const int errno_shmat = errno;
1921
1922  // (A) Right after shmat and before handing shmat errors delete the shm segment.
1923  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1924    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1925    assert(false, "failed to remove shared memory segment!");
1926  }
1927
1928  // Handle shmat error. If we failed to attach, just return.
1929  if (addr == (char*)-1) {
1930    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1931    return NULL;
1932  }
1933
1934  // Just for info: query the real page size. In case setting the page size did not
1935  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
1936  const size_t real_pagesize = os::Aix::query_pagesize(addr);
1937  if (real_pagesize != shmbuf.shm_pagesize) {
1938    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
1939  }
1940
1941  if (addr) {
1942    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
1943      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
1944  } else {
1945    if (requested_addr != NULL) {
1946      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
1947    } else {
1948      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
1949    }
1950  }
1951
1952  // book-keeping
1953  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
1954  assert0(is_aligned_to(addr, os::vm_page_size()));
1955
1956  return addr;
1957}
1958
1959static bool release_shmated_memory(char* addr, size_t size) {
1960
1961  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1962    addr, addr + size - 1);
1963
1964  bool rc = false;
1965
1966  // TODO: is there a way to verify shm size without doing bookkeeping?
1967  if (::shmdt(addr) != 0) {
1968    trcVerbose("error (%d).", errno);
1969  } else {
1970    trcVerbose("ok.");
1971    rc = true;
1972  }
1973  return rc;
1974}
1975
1976static bool uncommit_shmated_memory(char* addr, size_t size) {
1977  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
1978    addr, addr + size - 1);
1979
1980  const bool rc = my_disclaim64(addr, size);
1981
1982  if (!rc) {
1983    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
1984    return false;
1985  }
1986  return true;
1987}
1988
1989////////////////////////////////  mmap-based routines /////////////////////////////////
1990
1991// Reserve memory via mmap.
1992// If <requested_addr> is given, an attempt is made to attach at the given address.
1993// Failing that, memory is allocated at any address.
1994// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
1995// allocate at an address aligned with the given alignment. Failing that, memory
1996// is aligned anywhere.
1997static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
1998  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
1999    "alignment_hint " UINTX_FORMAT "...",
2000    bytes, requested_addr, alignment_hint);
2001
2002  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2003  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2004    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2005    return NULL;
2006  }
2007
2008  // We must prevent anyone from attaching too close to the
2009  // BRK because that may cause malloc OOM.
2010  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2011    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2012      "Will attach anywhere.", requested_addr);
2013    // Act like the OS refused to attach there.
2014    requested_addr = NULL;
2015  }
2016
2017  // Specify one or the other but not both.
2018  assert0(!(requested_addr != NULL && alignment_hint > 0));
2019
2020  // In 64K mode, we claim the global page size (os::vm_page_size())
2021  // is 64K. This is one of the few points where that illusion may
2022  // break, because mmap() will always return memory aligned to 4K. So
2023  // we must ensure we only ever return memory aligned to 64k.
2024  if (alignment_hint) {
2025    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2026  } else {
2027    alignment_hint = os::vm_page_size();
2028  }
2029
2030  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2031  const size_t size = align_up(bytes, os::vm_page_size());
2032
2033  // alignment: Allocate memory large enough to include an aligned range of the right size and
2034  // cut off the leading and trailing waste pages.
2035  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2036  const size_t extra_size = size + alignment_hint;
2037
2038  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2039  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2040  int flags = MAP_ANONYMOUS | MAP_SHARED;
2041
2042  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2043  // it means if wishaddress is given but MAP_FIXED is not set.
2044  //
2045  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2046  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2047  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2048  // get clobbered.
2049  if (requested_addr != NULL) {
2050    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2051      flags |= MAP_FIXED;
2052    }
2053  }
2054
2055  char* addr = (char*)::mmap(requested_addr, extra_size,
2056      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2057
2058  if (addr == MAP_FAILED) {
2059    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2060    return NULL;
2061  }
2062
2063  // Handle alignment.
2064  char* const addr_aligned = align_up(addr, alignment_hint);
2065  const size_t waste_pre = addr_aligned - addr;
2066  char* const addr_aligned_end = addr_aligned + size;
2067  const size_t waste_post = extra_size - waste_pre - size;
2068  if (waste_pre > 0) {
2069    ::munmap(addr, waste_pre);
2070  }
2071  if (waste_post > 0) {
2072    ::munmap(addr_aligned_end, waste_post);
2073  }
2074  addr = addr_aligned;
2075
2076  if (addr) {
2077    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2078      addr, addr + bytes, bytes);
2079  } else {
2080    if (requested_addr != NULL) {
2081      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2082    } else {
2083      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2084    }
2085  }
2086
2087  // bookkeeping
2088  vmembk_add(addr, size, 4*K, VMEM_MAPPED);
2089
2090  // Test alignment, see above.
2091  assert0(is_aligned_to(addr, os::vm_page_size()));
2092
2093  return addr;
2094}
2095
2096static bool release_mmaped_memory(char* addr, size_t size) {
2097  assert0(is_aligned_to(addr, os::vm_page_size()));
2098  assert0(is_aligned_to(size, os::vm_page_size()));
2099
2100  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2101    addr, addr + size - 1);
2102  bool rc = false;
2103
2104  if (::munmap(addr, size) != 0) {
2105    trcVerbose("failed (%d)\n", errno);
2106    rc = false;
2107  } else {
2108    trcVerbose("ok.");
2109    rc = true;
2110  }
2111
2112  return rc;
2113}
2114
2115static bool uncommit_mmaped_memory(char* addr, size_t size) {
2116
2117  assert0(is_aligned_to(addr, os::vm_page_size()));
2118  assert0(is_aligned_to(size, os::vm_page_size()));
2119
2120  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2121    addr, addr + size - 1);
2122  bool rc = false;
2123
2124  // Uncommit mmap memory with msync MS_INVALIDATE.
2125  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2126    trcVerbose("failed (%d)\n", errno);
2127    rc = false;
2128  } else {
2129    trcVerbose("ok.");
2130    rc = true;
2131  }
2132
2133  return rc;
2134}
2135
2136int os::vm_page_size() {
2137  // Seems redundant as all get out.
2138  assert(os::Aix::page_size() != -1, "must call os::init");
2139  return os::Aix::page_size();
2140}
2141
2142// Aix allocates memory by pages.
2143int os::vm_allocation_granularity() {
2144  assert(os::Aix::page_size() != -1, "must call os::init");
2145  return os::Aix::page_size();
2146}
2147
2148#ifdef PRODUCT
2149static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2150                                    int err) {
2151  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2152          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2153          os::errno_name(err), err);
2154}
2155#endif
2156
2157void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2158                                  const char* mesg) {
2159  assert(mesg != NULL, "mesg must be specified");
2160  if (!pd_commit_memory(addr, size, exec)) {
2161    // Add extra info in product mode for vm_exit_out_of_memory():
2162    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2163    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2164  }
2165}
2166
2167bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2168
2169  assert(is_aligned_to(addr, os::vm_page_size()),
2170    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2171    p2i(addr), os::vm_page_size());
2172  assert(is_aligned_to(size, os::vm_page_size()),
2173    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2174    size, os::vm_page_size());
2175
2176  vmembk_t* const vmi = vmembk_find(addr);
2177  guarantee0(vmi);
2178  vmi->assert_is_valid_subrange(addr, size);
2179
2180  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2181
2182  if (UseExplicitCommit) {
2183    // AIX commits memory on touch. So, touch all pages to be committed.
2184    for (char* p = addr; p < (addr + size); p += 4*K) {
2185      *p = '\0';
2186    }
2187  }
2188
2189  return true;
2190}
2191
2192bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2193  return pd_commit_memory(addr, size, exec);
2194}
2195
2196void os::pd_commit_memory_or_exit(char* addr, size_t size,
2197                                  size_t alignment_hint, bool exec,
2198                                  const char* mesg) {
2199  // Alignment_hint is ignored on this OS.
2200  pd_commit_memory_or_exit(addr, size, exec, mesg);
2201}
2202
2203bool os::pd_uncommit_memory(char* addr, size_t size) {
2204  assert(is_aligned_to(addr, os::vm_page_size()),
2205    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2206    p2i(addr), os::vm_page_size());
2207  assert(is_aligned_to(size, os::vm_page_size()),
2208    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2209    size, os::vm_page_size());
2210
2211  // Dynamically do different things for mmap/shmat.
2212  const vmembk_t* const vmi = vmembk_find(addr);
2213  guarantee0(vmi);
2214  vmi->assert_is_valid_subrange(addr, size);
2215
2216  if (vmi->type == VMEM_SHMATED) {
2217    return uncommit_shmated_memory(addr, size);
2218  } else {
2219    return uncommit_mmaped_memory(addr, size);
2220  }
2221}
2222
2223bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2224  // Do not call this; no need to commit stack pages on AIX.
2225  ShouldNotReachHere();
2226  return true;
2227}
2228
2229bool os::remove_stack_guard_pages(char* addr, size_t size) {
2230  // Do not call this; no need to commit stack pages on AIX.
2231  ShouldNotReachHere();
2232  return true;
2233}
2234
2235void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2236}
2237
2238void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2239}
2240
2241void os::numa_make_global(char *addr, size_t bytes) {
2242}
2243
2244void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2245}
2246
2247bool os::numa_topology_changed() {
2248  return false;
2249}
2250
2251size_t os::numa_get_groups_num() {
2252  return 1;
2253}
2254
2255int os::numa_get_group_id() {
2256  return 0;
2257}
2258
2259size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2260  if (size > 0) {
2261    ids[0] = 0;
2262    return 1;
2263  }
2264  return 0;
2265}
2266
2267bool os::get_page_info(char *start, page_info* info) {
2268  return false;
2269}
2270
2271char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2272  return end;
2273}
2274
2275// Reserves and attaches a shared memory segment.
2276// Will assert if a wish address is given and could not be obtained.
2277char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2278
2279  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2280  // thereby clobbering old mappings at that place. That is probably
2281  // not intended, never used and almost certainly an error were it
2282  // ever be used this way (to try attaching at a specified address
2283  // without clobbering old mappings an alternate API exists,
2284  // os::attempt_reserve_memory_at()).
2285  // Instead of mimicking the dangerous coding of the other platforms, here I
2286  // just ignore the request address (release) or assert(debug).
2287  assert0(requested_addr == NULL);
2288
2289  // Always round to os::vm_page_size(), which may be larger than 4K.
2290  bytes = align_up(bytes, os::vm_page_size());
2291  const size_t alignment_hint0 =
2292    alignment_hint ? align_up(alignment_hint, os::vm_page_size()) : 0;
2293
2294  // In 4K mode always use mmap.
2295  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2296  if (os::vm_page_size() == 4*K) {
2297    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2298  } else {
2299    if (bytes >= Use64KPagesThreshold) {
2300      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2301    } else {
2302      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2303    }
2304  }
2305}
2306
2307bool os::pd_release_memory(char* addr, size_t size) {
2308
2309  // Dynamically do different things for mmap/shmat.
2310  vmembk_t* const vmi = vmembk_find(addr);
2311  guarantee0(vmi);
2312
2313  // Always round to os::vm_page_size(), which may be larger than 4K.
2314  size = align_up(size, os::vm_page_size());
2315  addr = align_up(addr, os::vm_page_size());
2316
2317  bool rc = false;
2318  bool remove_bookkeeping = false;
2319  if (vmi->type == VMEM_SHMATED) {
2320    // For shmatted memory, we do:
2321    // - If user wants to release the whole range, release the memory (shmdt).
2322    // - If user only wants to release a partial range, uncommit (disclaim) that
2323    //   range. That way, at least, we do not use memory anymore (bust still page
2324    //   table space).
2325    vmi->assert_is_valid_subrange(addr, size);
2326    if (addr == vmi->addr && size == vmi->size) {
2327      rc = release_shmated_memory(addr, size);
2328      remove_bookkeeping = true;
2329    } else {
2330      rc = uncommit_shmated_memory(addr, size);
2331    }
2332  } else {
2333    // User may unmap partial regions but region has to be fully contained.
2334#ifdef ASSERT
2335    vmi->assert_is_valid_subrange(addr, size);
2336#endif
2337    rc = release_mmaped_memory(addr, size);
2338    remove_bookkeeping = true;
2339  }
2340
2341  // update bookkeeping
2342  if (rc && remove_bookkeeping) {
2343    vmembk_remove(vmi);
2344  }
2345
2346  return rc;
2347}
2348
2349static bool checked_mprotect(char* addr, size_t size, int prot) {
2350
2351  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2352  // not tell me if protection failed when trying to protect an un-protectable range.
2353  //
2354  // This means if the memory was allocated using shmget/shmat, protection wont work
2355  // but mprotect will still return 0:
2356  //
2357  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2358
2359  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2360
2361  if (!rc) {
2362    const char* const s_errno = os::errno_name(errno);
2363    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2364    return false;
2365  }
2366
2367  // mprotect success check
2368  //
2369  // Mprotect said it changed the protection but can I believe it?
2370  //
2371  // To be sure I need to check the protection afterwards. Try to
2372  // read from protected memory and check whether that causes a segfault.
2373  //
2374  if (!os::Aix::xpg_sus_mode()) {
2375
2376    if (CanUseSafeFetch32()) {
2377
2378      const bool read_protected =
2379        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2380         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2381
2382      if (prot & PROT_READ) {
2383        rc = !read_protected;
2384      } else {
2385        rc = read_protected;
2386      }
2387
2388      if (!rc) {
2389        if (os::Aix::on_pase()) {
2390          // There is an issue on older PASE systems where mprotect() will return success but the
2391          // memory will not be protected.
2392          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2393          // machines; we only see it rarely, when using mprotect() to protect the guard page of
2394          // a stack. It is an OS error.
2395          //
2396          // A valid strategy is just to try again. This usually works. :-/
2397
2398          ::usleep(1000);
2399          if (::mprotect(addr, size, prot) == 0) {
2400            const bool read_protected_2 =
2401              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2402              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2403            rc = true;
2404          }
2405        }
2406      }
2407    }
2408  }
2409
2410  assert(rc == true, "mprotect failed.");
2411
2412  return rc;
2413}
2414
2415// Set protections specified
2416bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2417  unsigned int p = 0;
2418  switch (prot) {
2419  case MEM_PROT_NONE: p = PROT_NONE; break;
2420  case MEM_PROT_READ: p = PROT_READ; break;
2421  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2422  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2423  default:
2424    ShouldNotReachHere();
2425  }
2426  // is_committed is unused.
2427  return checked_mprotect(addr, size, p);
2428}
2429
2430bool os::guard_memory(char* addr, size_t size) {
2431  return checked_mprotect(addr, size, PROT_NONE);
2432}
2433
2434bool os::unguard_memory(char* addr, size_t size) {
2435  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2436}
2437
2438// Large page support
2439
2440static size_t _large_page_size = 0;
2441
2442// Enable large page support if OS allows that.
2443void os::large_page_init() {
2444  return; // Nothing to do. See query_multipage_support and friends.
2445}
2446
2447char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2448  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2449  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2450  // so this is not needed.
2451  assert(false, "should not be called on AIX");
2452  return NULL;
2453}
2454
2455bool os::release_memory_special(char* base, size_t bytes) {
2456  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2457  Unimplemented();
2458  return false;
2459}
2460
2461size_t os::large_page_size() {
2462  return _large_page_size;
2463}
2464
2465bool os::can_commit_large_page_memory() {
2466  // Does not matter, we do not support huge pages.
2467  return false;
2468}
2469
2470bool os::can_execute_large_page_memory() {
2471  // Does not matter, we do not support huge pages.
2472  return false;
2473}
2474
2475// Reserve memory at an arbitrary address, only if that area is
2476// available (and not reserved for something else).
2477char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2478  char* addr = NULL;
2479
2480  // Always round to os::vm_page_size(), which may be larger than 4K.
2481  bytes = align_up(bytes, os::vm_page_size());
2482
2483  // In 4K mode always use mmap.
2484  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2485  if (os::vm_page_size() == 4*K) {
2486    return reserve_mmaped_memory(bytes, requested_addr, 0);
2487  } else {
2488    if (bytes >= Use64KPagesThreshold) {
2489      return reserve_shmated_memory(bytes, requested_addr, 0);
2490    } else {
2491      return reserve_mmaped_memory(bytes, requested_addr, 0);
2492    }
2493  }
2494
2495  return addr;
2496}
2497
2498size_t os::read(int fd, void *buf, unsigned int nBytes) {
2499  return ::read(fd, buf, nBytes);
2500}
2501
2502size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2503  return ::pread(fd, buf, nBytes, offset);
2504}
2505
2506void os::naked_short_sleep(jlong ms) {
2507  struct timespec req;
2508
2509  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2510  req.tv_sec = 0;
2511  if (ms > 0) {
2512    req.tv_nsec = (ms % 1000) * 1000000;
2513  }
2514  else {
2515    req.tv_nsec = 1;
2516  }
2517
2518  nanosleep(&req, NULL);
2519
2520  return;
2521}
2522
2523// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2524void os::infinite_sleep() {
2525  while (true) {    // sleep forever ...
2526    ::sleep(100);   // ... 100 seconds at a time
2527  }
2528}
2529
2530// Used to convert frequent JVM_Yield() to nops
2531bool os::dont_yield() {
2532  return DontYieldALot;
2533}
2534
2535void os::naked_yield() {
2536  sched_yield();
2537}
2538
2539////////////////////////////////////////////////////////////////////////////////
2540// thread priority support
2541
2542// From AIX manpage to pthread_setschedparam
2543// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2544//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2545//
2546// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2547// range from 40 to 80, where 40 is the least favored priority and 80
2548// is the most favored."
2549//
2550// (Actually, I doubt this even has an impact on AIX, as we do kernel
2551// scheduling there; however, this still leaves iSeries.)
2552//
2553// We use the same values for AIX and PASE.
2554int os::java_to_os_priority[CriticalPriority + 1] = {
2555  54,             // 0 Entry should never be used
2556
2557  55,             // 1 MinPriority
2558  55,             // 2
2559  56,             // 3
2560
2561  56,             // 4
2562  57,             // 5 NormPriority
2563  57,             // 6
2564
2565  58,             // 7
2566  58,             // 8
2567  59,             // 9 NearMaxPriority
2568
2569  60,             // 10 MaxPriority
2570
2571  60              // 11 CriticalPriority
2572};
2573
2574OSReturn os::set_native_priority(Thread* thread, int newpri) {
2575  if (!UseThreadPriorities) return OS_OK;
2576  pthread_t thr = thread->osthread()->pthread_id();
2577  int policy = SCHED_OTHER;
2578  struct sched_param param;
2579  param.sched_priority = newpri;
2580  int ret = pthread_setschedparam(thr, policy, &param);
2581
2582  if (ret != 0) {
2583    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2584        (int)thr, newpri, ret, os::errno_name(ret));
2585  }
2586  return (ret == 0) ? OS_OK : OS_ERR;
2587}
2588
2589OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2590  if (!UseThreadPriorities) {
2591    *priority_ptr = java_to_os_priority[NormPriority];
2592    return OS_OK;
2593  }
2594  pthread_t thr = thread->osthread()->pthread_id();
2595  int policy = SCHED_OTHER;
2596  struct sched_param param;
2597  int ret = pthread_getschedparam(thr, &policy, &param);
2598  *priority_ptr = param.sched_priority;
2599
2600  return (ret == 0) ? OS_OK : OS_ERR;
2601}
2602
2603// Hint to the underlying OS that a task switch would not be good.
2604// Void return because it's a hint and can fail.
2605void os::hint_no_preempt() {}
2606
2607////////////////////////////////////////////////////////////////////////////////
2608// suspend/resume support
2609
2610//  The low-level signal-based suspend/resume support is a remnant from the
2611//  old VM-suspension that used to be for java-suspension, safepoints etc,
2612//  within hotspot. Currently used by JFR's OSThreadSampler
2613//
2614//  The remaining code is greatly simplified from the more general suspension
2615//  code that used to be used.
2616//
2617//  The protocol is quite simple:
2618//  - suspend:
2619//      - sends a signal to the target thread
2620//      - polls the suspend state of the osthread using a yield loop
2621//      - target thread signal handler (SR_handler) sets suspend state
2622//        and blocks in sigsuspend until continued
2623//  - resume:
2624//      - sets target osthread state to continue
2625//      - sends signal to end the sigsuspend loop in the SR_handler
2626//
2627//  Note that the SR_lock plays no role in this suspend/resume protocol,
2628//  but is checked for NULL in SR_handler as a thread termination indicator.
2629//  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
2630//
2631//  Note that resume_clear_context() and suspend_save_context() are needed
2632//  by SR_handler(), so that fetch_frame_from_ucontext() works,
2633//  which in part is used by:
2634//    - Forte Analyzer: AsyncGetCallTrace()
2635//    - StackBanging: get_frame_at_stack_banging_point()
2636
2637static void resume_clear_context(OSThread *osthread) {
2638  osthread->set_ucontext(NULL);
2639  osthread->set_siginfo(NULL);
2640}
2641
2642static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2643  osthread->set_ucontext(context);
2644  osthread->set_siginfo(siginfo);
2645}
2646
2647//
2648// Handler function invoked when a thread's execution is suspended or
2649// resumed. We have to be careful that only async-safe functions are
2650// called here (Note: most pthread functions are not async safe and
2651// should be avoided.)
2652//
2653// Note: sigwait() is a more natural fit than sigsuspend() from an
2654// interface point of view, but sigwait() prevents the signal hander
2655// from being run. libpthread would get very confused by not having
2656// its signal handlers run and prevents sigwait()'s use with the
2657// mutex granting granting signal.
2658//
2659// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2660//
2661static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2662  // Save and restore errno to avoid confusing native code with EINTR
2663  // after sigsuspend.
2664  int old_errno = errno;
2665
2666  Thread* thread = Thread::current_or_null_safe();
2667  assert(thread != NULL, "Missing current thread in SR_handler");
2668
2669  // On some systems we have seen signal delivery get "stuck" until the signal
2670  // mask is changed as part of thread termination. Check that the current thread
2671  // has not already terminated (via SR_lock()) - else the following assertion
2672  // will fail because the thread is no longer a JavaThread as the ~JavaThread
2673  // destructor has completed.
2674
2675  if (thread->SR_lock() == NULL) {
2676    return;
2677  }
2678
2679  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2680
2681  OSThread* osthread = thread->osthread();
2682
2683  os::SuspendResume::State current = osthread->sr.state();
2684  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2685    suspend_save_context(osthread, siginfo, context);
2686
2687    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2688    os::SuspendResume::State state = osthread->sr.suspended();
2689    if (state == os::SuspendResume::SR_SUSPENDED) {
2690      sigset_t suspend_set;  // signals for sigsuspend()
2691
2692      // get current set of blocked signals and unblock resume signal
2693      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2694      sigdelset(&suspend_set, SR_signum);
2695
2696      // wait here until we are resumed
2697      while (1) {
2698        sigsuspend(&suspend_set);
2699
2700        os::SuspendResume::State result = osthread->sr.running();
2701        if (result == os::SuspendResume::SR_RUNNING) {
2702          break;
2703        }
2704      }
2705
2706    } else if (state == os::SuspendResume::SR_RUNNING) {
2707      // request was cancelled, continue
2708    } else {
2709      ShouldNotReachHere();
2710    }
2711
2712    resume_clear_context(osthread);
2713  } else if (current == os::SuspendResume::SR_RUNNING) {
2714    // request was cancelled, continue
2715  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2716    // ignore
2717  } else {
2718    ShouldNotReachHere();
2719  }
2720
2721  errno = old_errno;
2722}
2723
2724static int SR_initialize() {
2725  struct sigaction act;
2726  char *s;
2727  // Get signal number to use for suspend/resume
2728  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2729    int sig = ::strtol(s, 0, 10);
2730    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2731        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2732      SR_signum = sig;
2733    } else {
2734      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2735              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2736    }
2737  }
2738
2739  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2740        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2741
2742  sigemptyset(&SR_sigset);
2743  sigaddset(&SR_sigset, SR_signum);
2744
2745  // Set up signal handler for suspend/resume.
2746  act.sa_flags = SA_RESTART|SA_SIGINFO;
2747  act.sa_handler = (void (*)(int)) SR_handler;
2748
2749  // SR_signum is blocked by default.
2750  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2751
2752  if (sigaction(SR_signum, &act, 0) == -1) {
2753    return -1;
2754  }
2755
2756  // Save signal flag
2757  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2758  return 0;
2759}
2760
2761static int SR_finalize() {
2762  return 0;
2763}
2764
2765static int sr_notify(OSThread* osthread) {
2766  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2767  assert_status(status == 0, status, "pthread_kill");
2768  return status;
2769}
2770
2771// "Randomly" selected value for how long we want to spin
2772// before bailing out on suspending a thread, also how often
2773// we send a signal to a thread we want to resume
2774static const int RANDOMLY_LARGE_INTEGER = 1000000;
2775static const int RANDOMLY_LARGE_INTEGER2 = 100;
2776
2777// returns true on success and false on error - really an error is fatal
2778// but this seems the normal response to library errors
2779static bool do_suspend(OSThread* osthread) {
2780  assert(osthread->sr.is_running(), "thread should be running");
2781  // mark as suspended and send signal
2782
2783  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2784    // failed to switch, state wasn't running?
2785    ShouldNotReachHere();
2786    return false;
2787  }
2788
2789  if (sr_notify(osthread) != 0) {
2790    // try to cancel, switch to running
2791
2792    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2793    if (result == os::SuspendResume::SR_RUNNING) {
2794      // cancelled
2795      return false;
2796    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2797      // somehow managed to suspend
2798      return true;
2799    } else {
2800      ShouldNotReachHere();
2801      return false;
2802    }
2803  }
2804
2805  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2806
2807  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2808    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2809      os::naked_yield();
2810    }
2811
2812    // timeout, try to cancel the request
2813    if (n >= RANDOMLY_LARGE_INTEGER) {
2814      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2815      if (cancelled == os::SuspendResume::SR_RUNNING) {
2816        return false;
2817      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2818        return true;
2819      } else {
2820        ShouldNotReachHere();
2821        return false;
2822      }
2823    }
2824  }
2825
2826  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2827  return true;
2828}
2829
2830static void do_resume(OSThread* osthread) {
2831  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2832
2833  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2834    // failed to switch to WAKEUP_REQUEST
2835    ShouldNotReachHere();
2836    return;
2837  }
2838
2839  while (!osthread->sr.is_running()) {
2840    if (sr_notify(osthread) == 0) {
2841      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2842        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2843          os::naked_yield();
2844        }
2845      }
2846    } else {
2847      ShouldNotReachHere();
2848    }
2849  }
2850
2851  guarantee(osthread->sr.is_running(), "Must be running!");
2852}
2853
2854///////////////////////////////////////////////////////////////////////////////////
2855// signal handling (except suspend/resume)
2856
2857// This routine may be used by user applications as a "hook" to catch signals.
2858// The user-defined signal handler must pass unrecognized signals to this
2859// routine, and if it returns true (non-zero), then the signal handler must
2860// return immediately. If the flag "abort_if_unrecognized" is true, then this
2861// routine will never retun false (zero), but instead will execute a VM panic
2862// routine kill the process.
2863//
2864// If this routine returns false, it is OK to call it again. This allows
2865// the user-defined signal handler to perform checks either before or after
2866// the VM performs its own checks. Naturally, the user code would be making
2867// a serious error if it tried to handle an exception (such as a null check
2868// or breakpoint) that the VM was generating for its own correct operation.
2869//
2870// This routine may recognize any of the following kinds of signals:
2871//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2872// It should be consulted by handlers for any of those signals.
2873//
2874// The caller of this routine must pass in the three arguments supplied
2875// to the function referred to in the "sa_sigaction" (not the "sa_handler")
2876// field of the structure passed to sigaction(). This routine assumes that
2877// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2878//
2879// Note that the VM will print warnings if it detects conflicting signal
2880// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2881//
2882extern "C" JNIEXPORT int
2883JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2884
2885// Set thread signal mask (for some reason on AIX sigthreadmask() seems
2886// to be the thing to call; documentation is not terribly clear about whether
2887// pthread_sigmask also works, and if it does, whether it does the same.
2888bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2889  const int rc = ::pthread_sigmask(how, set, oset);
2890  // return value semantics differ slightly for error case:
2891  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2892  // (so, pthread_sigmask is more theadsafe for error handling)
2893  // But success is always 0.
2894  return rc == 0 ? true : false;
2895}
2896
2897// Function to unblock all signals which are, according
2898// to POSIX, typical program error signals. If they happen while being blocked,
2899// they typically will bring down the process immediately.
2900bool unblock_program_error_signals() {
2901  sigset_t set;
2902  ::sigemptyset(&set);
2903  ::sigaddset(&set, SIGILL);
2904  ::sigaddset(&set, SIGBUS);
2905  ::sigaddset(&set, SIGFPE);
2906  ::sigaddset(&set, SIGSEGV);
2907  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2908}
2909
2910// Renamed from 'signalHandler' to avoid collision with other shared libs.
2911void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2912  assert(info != NULL && uc != NULL, "it must be old kernel");
2913
2914  // Never leave program error signals blocked;
2915  // on all our platforms they would bring down the process immediately when
2916  // getting raised while being blocked.
2917  unblock_program_error_signals();
2918
2919  int orig_errno = errno;  // Preserve errno value over signal handler.
2920  JVM_handle_aix_signal(sig, info, uc, true);
2921  errno = orig_errno;
2922}
2923
2924// This boolean allows users to forward their own non-matching signals
2925// to JVM_handle_aix_signal, harmlessly.
2926bool os::Aix::signal_handlers_are_installed = false;
2927
2928// For signal-chaining
2929struct sigaction sigact[NSIG];
2930sigset_t sigs;
2931bool os::Aix::libjsig_is_loaded = false;
2932typedef struct sigaction *(*get_signal_t)(int);
2933get_signal_t os::Aix::get_signal_action = NULL;
2934
2935struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2936  struct sigaction *actp = NULL;
2937
2938  if (libjsig_is_loaded) {
2939    // Retrieve the old signal handler from libjsig
2940    actp = (*get_signal_action)(sig);
2941  }
2942  if (actp == NULL) {
2943    // Retrieve the preinstalled signal handler from jvm
2944    actp = get_preinstalled_handler(sig);
2945  }
2946
2947  return actp;
2948}
2949
2950static bool call_chained_handler(struct sigaction *actp, int sig,
2951                                 siginfo_t *siginfo, void *context) {
2952  // Call the old signal handler
2953  if (actp->sa_handler == SIG_DFL) {
2954    // It's more reasonable to let jvm treat it as an unexpected exception
2955    // instead of taking the default action.
2956    return false;
2957  } else if (actp->sa_handler != SIG_IGN) {
2958    if ((actp->sa_flags & SA_NODEFER) == 0) {
2959      // automaticlly block the signal
2960      sigaddset(&(actp->sa_mask), sig);
2961    }
2962
2963    sa_handler_t hand = NULL;
2964    sa_sigaction_t sa = NULL;
2965    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
2966    // retrieve the chained handler
2967    if (siginfo_flag_set) {
2968      sa = actp->sa_sigaction;
2969    } else {
2970      hand = actp->sa_handler;
2971    }
2972
2973    if ((actp->sa_flags & SA_RESETHAND) != 0) {
2974      actp->sa_handler = SIG_DFL;
2975    }
2976
2977    // try to honor the signal mask
2978    sigset_t oset;
2979    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
2980
2981    // call into the chained handler
2982    if (siginfo_flag_set) {
2983      (*sa)(sig, siginfo, context);
2984    } else {
2985      (*hand)(sig);
2986    }
2987
2988    // restore the signal mask
2989    pthread_sigmask(SIG_SETMASK, &oset, 0);
2990  }
2991  // Tell jvm's signal handler the signal is taken care of.
2992  return true;
2993}
2994
2995bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
2996  bool chained = false;
2997  // signal-chaining
2998  if (UseSignalChaining) {
2999    struct sigaction *actp = get_chained_signal_action(sig);
3000    if (actp != NULL) {
3001      chained = call_chained_handler(actp, sig, siginfo, context);
3002    }
3003  }
3004  return chained;
3005}
3006
3007size_t os::Aix::default_guard_size(os::ThreadType thr_type) {
3008  // Creating guard page is very expensive. Java thread has HotSpot
3009  // guard pages, only enable glibc guard page for non-Java threads.
3010  // (Remember: compiler thread is a Java thread, too!)
3011  //
3012  // Aix can have different page sizes for stack (4K) and heap (64K).
3013  // As Hotspot knows only one page size, we assume the stack has
3014  // the same page size as the heap. Returning page_size() here can
3015  // cause 16 guard pages which we want to avoid.  Thus we return 4K
3016  // which will be rounded to the real page size by the OS.
3017  return ((thr_type == java_thread || thr_type == compiler_thread) ? 0 : 4 * K);
3018}
3019
3020struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3021  if (sigismember(&sigs, sig)) {
3022    return &sigact[sig];
3023  }
3024  return NULL;
3025}
3026
3027void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3028  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3029  sigact[sig] = oldAct;
3030  sigaddset(&sigs, sig);
3031}
3032
3033// for diagnostic
3034int sigflags[NSIG];
3035
3036int os::Aix::get_our_sigflags(int sig) {
3037  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3038  return sigflags[sig];
3039}
3040
3041void os::Aix::set_our_sigflags(int sig, int flags) {
3042  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3043  if (sig > 0 && sig < NSIG) {
3044    sigflags[sig] = flags;
3045  }
3046}
3047
3048void os::Aix::set_signal_handler(int sig, bool set_installed) {
3049  // Check for overwrite.
3050  struct sigaction oldAct;
3051  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3052
3053  void* oldhand = oldAct.sa_sigaction
3054    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3055    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3056  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3057      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3058      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3059    if (AllowUserSignalHandlers || !set_installed) {
3060      // Do not overwrite; user takes responsibility to forward to us.
3061      return;
3062    } else if (UseSignalChaining) {
3063      // save the old handler in jvm
3064      save_preinstalled_handler(sig, oldAct);
3065      // libjsig also interposes the sigaction() call below and saves the
3066      // old sigaction on it own.
3067    } else {
3068      fatal("Encountered unexpected pre-existing sigaction handler "
3069            "%#lx for signal %d.", (long)oldhand, sig);
3070    }
3071  }
3072
3073  struct sigaction sigAct;
3074  sigfillset(&(sigAct.sa_mask));
3075  if (!set_installed) {
3076    sigAct.sa_handler = SIG_DFL;
3077    sigAct.sa_flags = SA_RESTART;
3078  } else {
3079    sigAct.sa_sigaction = javaSignalHandler;
3080    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3081  }
3082  // Save flags, which are set by ours
3083  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3084  sigflags[sig] = sigAct.sa_flags;
3085
3086  int ret = sigaction(sig, &sigAct, &oldAct);
3087  assert(ret == 0, "check");
3088
3089  void* oldhand2 = oldAct.sa_sigaction
3090                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3091                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3092  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3093}
3094
3095// install signal handlers for signals that HotSpot needs to
3096// handle in order to support Java-level exception handling.
3097void os::Aix::install_signal_handlers() {
3098  if (!signal_handlers_are_installed) {
3099    signal_handlers_are_installed = true;
3100
3101    // signal-chaining
3102    typedef void (*signal_setting_t)();
3103    signal_setting_t begin_signal_setting = NULL;
3104    signal_setting_t end_signal_setting = NULL;
3105    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3106                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3107    if (begin_signal_setting != NULL) {
3108      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3109                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3110      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3111                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3112      libjsig_is_loaded = true;
3113      assert(UseSignalChaining, "should enable signal-chaining");
3114    }
3115    if (libjsig_is_loaded) {
3116      // Tell libjsig jvm is setting signal handlers.
3117      (*begin_signal_setting)();
3118    }
3119
3120    ::sigemptyset(&sigs);
3121    set_signal_handler(SIGSEGV, true);
3122    set_signal_handler(SIGPIPE, true);
3123    set_signal_handler(SIGBUS, true);
3124    set_signal_handler(SIGILL, true);
3125    set_signal_handler(SIGFPE, true);
3126    set_signal_handler(SIGTRAP, true);
3127    set_signal_handler(SIGXFSZ, true);
3128
3129    if (libjsig_is_loaded) {
3130      // Tell libjsig jvm finishes setting signal handlers.
3131      (*end_signal_setting)();
3132    }
3133
3134    // We don't activate signal checker if libjsig is in place, we trust ourselves
3135    // and if UserSignalHandler is installed all bets are off.
3136    // Log that signal checking is off only if -verbose:jni is specified.
3137    if (CheckJNICalls) {
3138      if (libjsig_is_loaded) {
3139        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3140        check_signals = false;
3141      }
3142      if (AllowUserSignalHandlers) {
3143        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3144        check_signals = false;
3145      }
3146      // Need to initialize check_signal_done.
3147      ::sigemptyset(&check_signal_done);
3148    }
3149  }
3150}
3151
3152static const char* get_signal_handler_name(address handler,
3153                                           char* buf, int buflen) {
3154  int offset;
3155  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3156  if (found) {
3157    // skip directory names
3158    const char *p1, *p2;
3159    p1 = buf;
3160    size_t len = strlen(os::file_separator());
3161    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3162    // The way os::dll_address_to_library_name is implemented on Aix
3163    // right now, it always returns -1 for the offset which is not
3164    // terribly informative.
3165    // Will fix that. For now, omit the offset.
3166    jio_snprintf(buf, buflen, "%s", p1);
3167  } else {
3168    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3169  }
3170  return buf;
3171}
3172
3173static void print_signal_handler(outputStream* st, int sig,
3174                                 char* buf, size_t buflen) {
3175  struct sigaction sa;
3176  sigaction(sig, NULL, &sa);
3177
3178  st->print("%s: ", os::exception_name(sig, buf, buflen));
3179
3180  address handler = (sa.sa_flags & SA_SIGINFO)
3181    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3182    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3183
3184  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3185    st->print("SIG_DFL");
3186  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3187    st->print("SIG_IGN");
3188  } else {
3189    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3190  }
3191
3192  // Print readable mask.
3193  st->print(", sa_mask[0]=");
3194  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3195
3196  address rh = VMError::get_resetted_sighandler(sig);
3197  // May be, handler was resetted by VMError?
3198  if (rh != NULL) {
3199    handler = rh;
3200    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3201  }
3202
3203  // Print textual representation of sa_flags.
3204  st->print(", sa_flags=");
3205  os::Posix::print_sa_flags(st, sa.sa_flags);
3206
3207  // Check: is it our handler?
3208  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3209      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3210    // It is our signal handler.
3211    // Check for flags, reset system-used one!
3212    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3213      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3214                os::Aix::get_our_sigflags(sig));
3215    }
3216  }
3217  st->cr();
3218}
3219
3220#define DO_SIGNAL_CHECK(sig) \
3221  if (!sigismember(&check_signal_done, sig)) \
3222    os::Aix::check_signal_handler(sig)
3223
3224// This method is a periodic task to check for misbehaving JNI applications
3225// under CheckJNI, we can add any periodic checks here
3226
3227void os::run_periodic_checks() {
3228
3229  if (check_signals == false) return;
3230
3231  // SEGV and BUS if overridden could potentially prevent
3232  // generation of hs*.log in the event of a crash, debugging
3233  // such a case can be very challenging, so we absolutely
3234  // check the following for a good measure:
3235  DO_SIGNAL_CHECK(SIGSEGV);
3236  DO_SIGNAL_CHECK(SIGILL);
3237  DO_SIGNAL_CHECK(SIGFPE);
3238  DO_SIGNAL_CHECK(SIGBUS);
3239  DO_SIGNAL_CHECK(SIGPIPE);
3240  DO_SIGNAL_CHECK(SIGXFSZ);
3241  if (UseSIGTRAP) {
3242    DO_SIGNAL_CHECK(SIGTRAP);
3243  }
3244
3245  // ReduceSignalUsage allows the user to override these handlers
3246  // see comments at the very top and jvm_solaris.h
3247  if (!ReduceSignalUsage) {
3248    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3249    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3250    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3251    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3252  }
3253
3254  DO_SIGNAL_CHECK(SR_signum);
3255}
3256
3257typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3258
3259static os_sigaction_t os_sigaction = NULL;
3260
3261void os::Aix::check_signal_handler(int sig) {
3262  char buf[O_BUFLEN];
3263  address jvmHandler = NULL;
3264
3265  struct sigaction act;
3266  if (os_sigaction == NULL) {
3267    // only trust the default sigaction, in case it has been interposed
3268    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3269    if (os_sigaction == NULL) return;
3270  }
3271
3272  os_sigaction(sig, (struct sigaction*)NULL, &act);
3273
3274  address thisHandler = (act.sa_flags & SA_SIGINFO)
3275    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3276    : CAST_FROM_FN_PTR(address, act.sa_handler);
3277
3278  switch(sig) {
3279  case SIGSEGV:
3280  case SIGBUS:
3281  case SIGFPE:
3282  case SIGPIPE:
3283  case SIGILL:
3284  case SIGXFSZ:
3285    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3286    break;
3287
3288  case SHUTDOWN1_SIGNAL:
3289  case SHUTDOWN2_SIGNAL:
3290  case SHUTDOWN3_SIGNAL:
3291  case BREAK_SIGNAL:
3292    jvmHandler = (address)user_handler();
3293    break;
3294
3295  default:
3296    if (sig == SR_signum) {
3297      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3298    } else {
3299      return;
3300    }
3301    break;
3302  }
3303
3304  if (thisHandler != jvmHandler) {
3305    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3306    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3307    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3308    // No need to check this sig any longer
3309    sigaddset(&check_signal_done, sig);
3310    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3311    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3312      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3313                    exception_name(sig, buf, O_BUFLEN));
3314    }
3315  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3316    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3317    tty->print("expected:");
3318    os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3319    tty->cr();
3320    tty->print("  found:");
3321    os::Posix::print_sa_flags(tty, act.sa_flags);
3322    tty->cr();
3323    // No need to check this sig any longer
3324    sigaddset(&check_signal_done, sig);
3325  }
3326
3327  // Dump all the signal
3328  if (sigismember(&check_signal_done, sig)) {
3329    print_signal_handlers(tty, buf, O_BUFLEN);
3330  }
3331}
3332
3333// To install functions for atexit system call
3334extern "C" {
3335  static void perfMemory_exit_helper() {
3336    perfMemory_exit();
3337  }
3338}
3339
3340// This is called _before_ the most of global arguments have been parsed.
3341void os::init(void) {
3342  // This is basic, we want to know if that ever changes.
3343  // (Shared memory boundary is supposed to be a 256M aligned.)
3344  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3345
3346  // Record process break at startup.
3347  g_brk_at_startup = (address) ::sbrk(0);
3348  assert(g_brk_at_startup != (address) -1, "sbrk failed");
3349
3350  // First off, we need to know whether we run on AIX or PASE, and
3351  // the OS level we run on.
3352  os::Aix::initialize_os_info();
3353
3354  // Scan environment (SPEC1170 behaviour, etc).
3355  os::Aix::scan_environment();
3356
3357  // Probe multipage support.
3358  query_multipage_support();
3359
3360  // Act like we only have one page size by eliminating corner cases which
3361  // we did not support very well anyway.
3362  // We have two input conditions:
3363  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3364  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3365  //    setting.
3366  //    Data segment page size is important for us because it defines the thread stack page
3367  //    size, which is needed for guard page handling, stack banging etc.
3368  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3369  //    and should be allocated with 64k pages.
3370  //
3371  // So, we do the following:
3372  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3373  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3374  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3375  // 64k          no              --- AIX 5.2 ? ---
3376  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3377
3378  // We explicitly leave no option to change page size, because only upgrading would work,
3379  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3380
3381  if (g_multipage_support.datapsize == 4*K) {
3382    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3383    if (g_multipage_support.can_use_64K_pages) {
3384      // .. but we are able to use 64K pages dynamically.
3385      // This would be typical for java launchers which are not linked
3386      // with datapsize=64K (like, any other launcher but our own).
3387      //
3388      // In this case it would be smart to allocate the java heap with 64K
3389      // to get the performance benefit, and to fake 64k pages for the
3390      // data segment (when dealing with thread stacks).
3391      //
3392      // However, leave a possibility to downgrade to 4K, using
3393      // -XX:-Use64KPages.
3394      if (Use64KPages) {
3395        trcVerbose("64K page mode (faked for data segment)");
3396        Aix::_page_size = 64*K;
3397      } else {
3398        trcVerbose("4K page mode (Use64KPages=off)");
3399        Aix::_page_size = 4*K;
3400      }
3401    } else {
3402      // .. and not able to allocate 64k pages dynamically. Here, just
3403      // fall back to 4K paged mode and use mmap for everything.
3404      trcVerbose("4K page mode");
3405      Aix::_page_size = 4*K;
3406      FLAG_SET_ERGO(bool, Use64KPages, false);
3407    }
3408  } else {
3409    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3410    // This normally means that we can allocate 64k pages dynamically.
3411    // (There is one special case where this may be false: EXTSHM=on.
3412    // but we decided to not support that mode).
3413    assert0(g_multipage_support.can_use_64K_pages);
3414    Aix::_page_size = 64*K;
3415    trcVerbose("64K page mode");
3416    FLAG_SET_ERGO(bool, Use64KPages, true);
3417  }
3418
3419  // For now UseLargePages is just ignored.
3420  FLAG_SET_ERGO(bool, UseLargePages, false);
3421  _page_sizes[0] = 0;
3422
3423  // debug trace
3424  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3425
3426  // Next, we need to initialize libo4 and libperfstat libraries.
3427  if (os::Aix::on_pase()) {
3428    os::Aix::initialize_libo4();
3429  } else {
3430    os::Aix::initialize_libperfstat();
3431  }
3432
3433  // Reset the perfstat information provided by ODM.
3434  if (os::Aix::on_aix()) {
3435    libperfstat::perfstat_reset();
3436  }
3437
3438  // Now initialze basic system properties. Note that for some of the values we
3439  // need libperfstat etc.
3440  os::Aix::initialize_system_info();
3441
3442  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3443
3444  init_random(1234567);
3445
3446  ThreadCritical::initialize();
3447
3448  // Main_thread points to the aboriginal thread.
3449  Aix::_main_thread = pthread_self();
3450
3451  initial_time_count = os::elapsed_counter();
3452
3453  os::Posix::init();
3454}
3455
3456// This is called _after_ the global arguments have been parsed.
3457jint os::init_2(void) {
3458
3459  os::Posix::init_2();
3460
3461  if (os::Aix::on_pase()) {
3462    trcVerbose("Running on PASE.");
3463  } else {
3464    trcVerbose("Running on AIX (not PASE).");
3465  }
3466
3467  trcVerbose("processor count: %d", os::_processor_count);
3468  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3469
3470  // Initially build up the loaded dll map.
3471  LoadedLibraries::reload();
3472  if (Verbose) {
3473    trcVerbose("Loaded Libraries: ");
3474    LoadedLibraries::print(tty);
3475  }
3476
3477  const int page_size = Aix::page_size();
3478  const int map_size = page_size;
3479
3480  address map_address = (address) MAP_FAILED;
3481  const int prot  = PROT_READ;
3482  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3483
3484  // Use optimized addresses for the polling page,
3485  // e.g. map it to a special 32-bit address.
3486  if (OptimizePollingPageLocation) {
3487    // architecture-specific list of address wishes:
3488    address address_wishes[] = {
3489      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3490      // PPC64: all address wishes are non-negative 32 bit values where
3491      // the lower 16 bits are all zero. we can load these addresses
3492      // with a single ppc_lis instruction.
3493      (address) 0x30000000, (address) 0x31000000,
3494      (address) 0x32000000, (address) 0x33000000,
3495      (address) 0x40000000, (address) 0x41000000,
3496      (address) 0x42000000, (address) 0x43000000,
3497      (address) 0x50000000, (address) 0x51000000,
3498      (address) 0x52000000, (address) 0x53000000,
3499      (address) 0x60000000, (address) 0x61000000,
3500      (address) 0x62000000, (address) 0x63000000
3501    };
3502    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3503
3504    // iterate over the list of address wishes:
3505    for (int i=0; i<address_wishes_length; i++) {
3506      // Try to map with current address wish.
3507      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3508      // fail if the address is already mapped.
3509      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3510                                     map_size, prot,
3511                                     flags | MAP_FIXED,
3512                                     -1, 0);
3513      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3514                   address_wishes[i], map_address + (ssize_t)page_size);
3515
3516      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3517        // Map succeeded and map_address is at wished address, exit loop.
3518        break;
3519      }
3520
3521      if (map_address != (address) MAP_FAILED) {
3522        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3523        ::munmap(map_address, map_size);
3524        map_address = (address) MAP_FAILED;
3525      }
3526      // Map failed, continue loop.
3527    }
3528  } // end OptimizePollingPageLocation
3529
3530  if (map_address == (address) MAP_FAILED) {
3531    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3532  }
3533  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3534  os::set_polling_page(map_address);
3535
3536  if (!UseMembar) {
3537    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3538    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3539    os::set_memory_serialize_page(mem_serialize_page);
3540
3541    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3542        mem_serialize_page, mem_serialize_page + Aix::page_size(),
3543        Aix::page_size(), Aix::page_size());
3544  }
3545
3546  // initialize suspend/resume support - must do this before signal_sets_init()
3547  if (SR_initialize() != 0) {
3548    perror("SR_initialize failed");
3549    return JNI_ERR;
3550  }
3551
3552  Aix::signal_sets_init();
3553  Aix::install_signal_handlers();
3554
3555  // Check and sets minimum stack sizes against command line options
3556  if (Posix::set_minimum_stack_sizes() == JNI_ERR) {
3557    return JNI_ERR;
3558  }
3559
3560  if (UseNUMA) {
3561    UseNUMA = false;
3562    warning("NUMA optimizations are not available on this OS.");
3563  }
3564
3565  if (MaxFDLimit) {
3566    // Set the number of file descriptors to max. print out error
3567    // if getrlimit/setrlimit fails but continue regardless.
3568    struct rlimit nbr_files;
3569    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3570    if (status != 0) {
3571      log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3572    } else {
3573      nbr_files.rlim_cur = nbr_files.rlim_max;
3574      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3575      if (status != 0) {
3576        log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3577      }
3578    }
3579  }
3580
3581  if (PerfAllowAtExitRegistration) {
3582    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3583    // At exit functions can be delayed until process exit time, which
3584    // can be problematic for embedded VM situations. Embedded VMs should
3585    // call DestroyJavaVM() to assure that VM resources are released.
3586
3587    // Note: perfMemory_exit_helper atexit function may be removed in
3588    // the future if the appropriate cleanup code can be added to the
3589    // VM_Exit VMOperation's doit method.
3590    if (atexit(perfMemory_exit_helper) != 0) {
3591      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3592    }
3593  }
3594
3595  return JNI_OK;
3596}
3597
3598// Mark the polling page as unreadable
3599void os::make_polling_page_unreadable(void) {
3600  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3601    fatal("Could not disable polling page");
3602  }
3603};
3604
3605// Mark the polling page as readable
3606void os::make_polling_page_readable(void) {
3607  // Changed according to os_linux.cpp.
3608  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3609    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3610  }
3611};
3612
3613int os::active_processor_count() {
3614  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3615  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3616  return online_cpus;
3617}
3618
3619void os::set_native_thread_name(const char *name) {
3620  // Not yet implemented.
3621  return;
3622}
3623
3624bool os::distribute_processes(uint length, uint* distribution) {
3625  // Not yet implemented.
3626  return false;
3627}
3628
3629bool os::bind_to_processor(uint processor_id) {
3630  // Not yet implemented.
3631  return false;
3632}
3633
3634void os::SuspendedThreadTask::internal_do_task() {
3635  if (do_suspend(_thread->osthread())) {
3636    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3637    do_task(context);
3638    do_resume(_thread->osthread());
3639  }
3640}
3641
3642////////////////////////////////////////////////////////////////////////////////
3643// debug support
3644
3645bool os::find(address addr, outputStream* st) {
3646
3647  st->print(PTR_FORMAT ": ", addr);
3648
3649  loaded_module_t lm;
3650  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3651      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3652    st->print_cr("%s", lm.path);
3653    return true;
3654  }
3655
3656  return false;
3657}
3658
3659////////////////////////////////////////////////////////////////////////////////
3660// misc
3661
3662// This does not do anything on Aix. This is basically a hook for being
3663// able to use structured exception handling (thread-local exception filters)
3664// on, e.g., Win32.
3665void
3666os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3667                         JavaCallArguments* args, Thread* thread) {
3668  f(value, method, args, thread);
3669}
3670
3671void os::print_statistics() {
3672}
3673
3674bool os::message_box(const char* title, const char* message) {
3675  int i;
3676  fdStream err(defaultStream::error_fd());
3677  for (i = 0; i < 78; i++) err.print_raw("=");
3678  err.cr();
3679  err.print_raw_cr(title);
3680  for (i = 0; i < 78; i++) err.print_raw("-");
3681  err.cr();
3682  err.print_raw_cr(message);
3683  for (i = 0; i < 78; i++) err.print_raw("=");
3684  err.cr();
3685
3686  char buf[16];
3687  // Prevent process from exiting upon "read error" without consuming all CPU
3688  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3689
3690  return buf[0] == 'y' || buf[0] == 'Y';
3691}
3692
3693int os::stat(const char *path, struct stat *sbuf) {
3694  char pathbuf[MAX_PATH];
3695  if (strlen(path) > MAX_PATH - 1) {
3696    errno = ENAMETOOLONG;
3697    return -1;
3698  }
3699  os::native_path(strcpy(pathbuf, path));
3700  return ::stat(pathbuf, sbuf);
3701}
3702
3703// Is a (classpath) directory empty?
3704bool os::dir_is_empty(const char* path) {
3705  DIR *dir = NULL;
3706  struct dirent *ptr;
3707
3708  dir = opendir(path);
3709  if (dir == NULL) return true;
3710
3711  /* Scan the directory */
3712  bool result = true;
3713  char buf[sizeof(struct dirent) + MAX_PATH];
3714  while (result && (ptr = ::readdir(dir)) != NULL) {
3715    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3716      result = false;
3717    }
3718  }
3719  closedir(dir);
3720  return result;
3721}
3722
3723// This code originates from JDK's sysOpen and open64_w
3724// from src/solaris/hpi/src/system_md.c
3725
3726int os::open(const char *path, int oflag, int mode) {
3727
3728  if (strlen(path) > MAX_PATH - 1) {
3729    errno = ENAMETOOLONG;
3730    return -1;
3731  }
3732  int fd;
3733
3734  fd = ::open64(path, oflag, mode);
3735  if (fd == -1) return -1;
3736
3737  // If the open succeeded, the file might still be a directory.
3738  {
3739    struct stat64 buf64;
3740    int ret = ::fstat64(fd, &buf64);
3741    int st_mode = buf64.st_mode;
3742
3743    if (ret != -1) {
3744      if ((st_mode & S_IFMT) == S_IFDIR) {
3745        errno = EISDIR;
3746        ::close(fd);
3747        return -1;
3748      }
3749    } else {
3750      ::close(fd);
3751      return -1;
3752    }
3753  }
3754
3755  // All file descriptors that are opened in the JVM and not
3756  // specifically destined for a subprocess should have the
3757  // close-on-exec flag set. If we don't set it, then careless 3rd
3758  // party native code might fork and exec without closing all
3759  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3760  // UNIXProcess.c), and this in turn might:
3761  //
3762  // - cause end-of-file to fail to be detected on some file
3763  //   descriptors, resulting in mysterious hangs, or
3764  //
3765  // - might cause an fopen in the subprocess to fail on a system
3766  //   suffering from bug 1085341.
3767  //
3768  // (Yes, the default setting of the close-on-exec flag is a Unix
3769  // design flaw.)
3770  //
3771  // See:
3772  // 1085341: 32-bit stdio routines should support file descriptors >255
3773  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3774  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3775#ifdef FD_CLOEXEC
3776  {
3777    int flags = ::fcntl(fd, F_GETFD);
3778    if (flags != -1)
3779      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3780  }
3781#endif
3782
3783  return fd;
3784}
3785
3786// create binary file, rewriting existing file if required
3787int os::create_binary_file(const char* path, bool rewrite_existing) {
3788  int oflags = O_WRONLY | O_CREAT;
3789  if (!rewrite_existing) {
3790    oflags |= O_EXCL;
3791  }
3792  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3793}
3794
3795// return current position of file pointer
3796jlong os::current_file_offset(int fd) {
3797  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3798}
3799
3800// move file pointer to the specified offset
3801jlong os::seek_to_file_offset(int fd, jlong offset) {
3802  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3803}
3804
3805// This code originates from JDK's sysAvailable
3806// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3807
3808int os::available(int fd, jlong *bytes) {
3809  jlong cur, end;
3810  int mode;
3811  struct stat64 buf64;
3812
3813  if (::fstat64(fd, &buf64) >= 0) {
3814    mode = buf64.st_mode;
3815    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3816      int n;
3817      if (::ioctl(fd, FIONREAD, &n) >= 0) {
3818        *bytes = n;
3819        return 1;
3820      }
3821    }
3822  }
3823  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3824    return 0;
3825  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3826    return 0;
3827  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3828    return 0;
3829  }
3830  *bytes = end - cur;
3831  return 1;
3832}
3833
3834// Map a block of memory.
3835char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3836                        char *addr, size_t bytes, bool read_only,
3837                        bool allow_exec) {
3838  int prot;
3839  int flags = MAP_PRIVATE;
3840
3841  if (read_only) {
3842    prot = PROT_READ;
3843    flags = MAP_SHARED;
3844  } else {
3845    prot = PROT_READ | PROT_WRITE;
3846    flags = MAP_PRIVATE;
3847  }
3848
3849  if (allow_exec) {
3850    prot |= PROT_EXEC;
3851  }
3852
3853  if (addr != NULL) {
3854    flags |= MAP_FIXED;
3855  }
3856
3857  // Allow anonymous mappings if 'fd' is -1.
3858  if (fd == -1) {
3859    flags |= MAP_ANONYMOUS;
3860  }
3861
3862  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3863                                     fd, file_offset);
3864  if (mapped_address == MAP_FAILED) {
3865    return NULL;
3866  }
3867  return mapped_address;
3868}
3869
3870// Remap a block of memory.
3871char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3872                          char *addr, size_t bytes, bool read_only,
3873                          bool allow_exec) {
3874  // same as map_memory() on this OS
3875  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3876                        allow_exec);
3877}
3878
3879// Unmap a block of memory.
3880bool os::pd_unmap_memory(char* addr, size_t bytes) {
3881  return munmap(addr, bytes) == 0;
3882}
3883
3884// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3885// are used by JVM M&M and JVMTI to get user+sys or user CPU time
3886// of a thread.
3887//
3888// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3889// the fast estimate available on the platform.
3890
3891jlong os::current_thread_cpu_time() {
3892  // return user + sys since the cost is the same
3893  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3894  assert(n >= 0, "negative CPU time");
3895  return n;
3896}
3897
3898jlong os::thread_cpu_time(Thread* thread) {
3899  // consistent with what current_thread_cpu_time() returns
3900  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3901  assert(n >= 0, "negative CPU time");
3902  return n;
3903}
3904
3905jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
3906  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
3907  assert(n >= 0, "negative CPU time");
3908  return n;
3909}
3910
3911static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
3912  bool error = false;
3913
3914  jlong sys_time = 0;
3915  jlong user_time = 0;
3916
3917  // Reimplemented using getthrds64().
3918  //
3919  // Works like this:
3920  // For the thread in question, get the kernel thread id. Then get the
3921  // kernel thread statistics using that id.
3922  //
3923  // This only works of course when no pthread scheduling is used,
3924  // i.e. there is a 1:1 relationship to kernel threads.
3925  // On AIX, see AIXTHREAD_SCOPE variable.
3926
3927  pthread_t pthtid = thread->osthread()->pthread_id();
3928
3929  // retrieve kernel thread id for the pthread:
3930  tid64_t tid = 0;
3931  struct __pthrdsinfo pinfo;
3932  // I just love those otherworldly IBM APIs which force me to hand down
3933  // dummy buffers for stuff I dont care for...
3934  char dummy[1];
3935  int dummy_size = sizeof(dummy);
3936  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
3937                          dummy, &dummy_size) == 0) {
3938    tid = pinfo.__pi_tid;
3939  } else {
3940    tty->print_cr("pthread_getthrds_np failed.");
3941    error = true;
3942  }
3943
3944  // retrieve kernel timing info for that kernel thread
3945  if (!error) {
3946    struct thrdentry64 thrdentry;
3947    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
3948      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
3949      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
3950    } else {
3951      tty->print_cr("pthread_getthrds_np failed.");
3952      error = true;
3953    }
3954  }
3955
3956  if (p_sys_time) {
3957    *p_sys_time = sys_time;
3958  }
3959
3960  if (p_user_time) {
3961    *p_user_time = user_time;
3962  }
3963
3964  if (error) {
3965    return false;
3966  }
3967
3968  return true;
3969}
3970
3971jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
3972  jlong sys_time;
3973  jlong user_time;
3974
3975  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
3976    return -1;
3977  }
3978
3979  return user_sys_cpu_time ? sys_time + user_time : user_time;
3980}
3981
3982void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
3983  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
3984  info_ptr->may_skip_backward = false;     // elapsed time not wall time
3985  info_ptr->may_skip_forward = false;      // elapsed time not wall time
3986  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
3987}
3988
3989void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
3990  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
3991  info_ptr->may_skip_backward = false;     // elapsed time not wall time
3992  info_ptr->may_skip_forward = false;      // elapsed time not wall time
3993  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
3994}
3995
3996bool os::is_thread_cpu_time_supported() {
3997  return true;
3998}
3999
4000// System loadavg support. Returns -1 if load average cannot be obtained.
4001// For now just return the system wide load average (no processor sets).
4002int os::loadavg(double values[], int nelem) {
4003
4004  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4005  guarantee(values, "argument error");
4006
4007  if (os::Aix::on_pase()) {
4008
4009    // AS/400 PASE: use libo4 porting library
4010    double v[3] = { 0.0, 0.0, 0.0 };
4011
4012    if (libo4::get_load_avg(v, v + 1, v + 2)) {
4013      for (int i = 0; i < nelem; i ++) {
4014        values[i] = v[i];
4015      }
4016      return nelem;
4017    } else {
4018      return -1;
4019    }
4020
4021  } else {
4022
4023    // AIX: use libperfstat
4024    libperfstat::cpuinfo_t ci;
4025    if (libperfstat::get_cpuinfo(&ci)) {
4026      for (int i = 0; i < nelem; i++) {
4027        values[i] = ci.loadavg[i];
4028      }
4029    } else {
4030      return -1;
4031    }
4032    return nelem;
4033  }
4034}
4035
4036void os::pause() {
4037  char filename[MAX_PATH];
4038  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4039    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4040  } else {
4041    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4042  }
4043
4044  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4045  if (fd != -1) {
4046    struct stat buf;
4047    ::close(fd);
4048    while (::stat(filename, &buf) == 0) {
4049      (void)::poll(NULL, 0, 100);
4050    }
4051  } else {
4052    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4053  }
4054}
4055
4056bool os::Aix::is_primordial_thread() {
4057  if (pthread_self() == (pthread_t)1) {
4058    return true;
4059  } else {
4060    return false;
4061  }
4062}
4063
4064// OS recognitions (PASE/AIX, OS level) call this before calling any
4065// one of Aix::on_pase(), Aix::os_version() static
4066void os::Aix::initialize_os_info() {
4067
4068  assert(_on_pase == -1 && _os_version == 0, "already called.");
4069
4070  struct utsname uts;
4071  memset(&uts, 0, sizeof(uts));
4072  strcpy(uts.sysname, "?");
4073  if (::uname(&uts) == -1) {
4074    trcVerbose("uname failed (%d)", errno);
4075    guarantee(0, "Could not determine whether we run on AIX or PASE");
4076  } else {
4077    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4078               "node \"%s\" machine \"%s\"\n",
4079               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4080    const int major = atoi(uts.version);
4081    assert(major > 0, "invalid OS version");
4082    const int minor = atoi(uts.release);
4083    assert(minor > 0, "invalid OS release");
4084    _os_version = (major << 24) | (minor << 16);
4085    char ver_str[20] = {0};
4086    char *name_str = "unknown OS";
4087    if (strcmp(uts.sysname, "OS400") == 0) {
4088      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4089      _on_pase = 1;
4090      if (os_version_short() < 0x0504) {
4091        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4092        assert(false, "OS/400 release too old.");
4093      }
4094      name_str = "OS/400 (pase)";
4095      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4096    } else if (strcmp(uts.sysname, "AIX") == 0) {
4097      // We run on AIX. We do not support versions older than AIX 5.3.
4098      _on_pase = 0;
4099      // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4100      odmWrapper::determine_os_kernel_version(&_os_version);
4101      if (os_version_short() < 0x0503) {
4102        trcVerbose("AIX release older than AIX 5.3 not supported.");
4103        assert(false, "AIX release too old.");
4104      }
4105      name_str = "AIX";
4106      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4107                   major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4108    } else {
4109      assert(false, name_str);
4110    }
4111    trcVerbose("We run on %s %s", name_str, ver_str);
4112  }
4113
4114  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4115} // end: os::Aix::initialize_os_info()
4116
4117// Scan environment for important settings which might effect the VM.
4118// Trace out settings. Warn about invalid settings and/or correct them.
4119//
4120// Must run after os::Aix::initialue_os_info().
4121void os::Aix::scan_environment() {
4122
4123  char* p;
4124  int rc;
4125
4126  // Warn explicity if EXTSHM=ON is used. That switch changes how
4127  // System V shared memory behaves. One effect is that page size of
4128  // shared memory cannot be change dynamically, effectivly preventing
4129  // large pages from working.
4130  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4131  // recommendation is (in OSS notes) to switch it off.
4132  p = ::getenv("EXTSHM");
4133  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4134  if (p && strcasecmp(p, "ON") == 0) {
4135    _extshm = 1;
4136    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4137    if (!AllowExtshm) {
4138      // We allow under certain conditions the user to continue. However, we want this
4139      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4140      // that the VM is not able to allocate 64k pages for the heap.
4141      // We do not want to run with reduced performance.
4142      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4143    }
4144  } else {
4145    _extshm = 0;
4146  }
4147
4148  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4149  // Not tested, not supported.
4150  //
4151  // Note that it might be worth the trouble to test and to require it, if only to
4152  // get useful return codes for mprotect.
4153  //
4154  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4155  // exec() ? before loading the libjvm ? ....)
4156  p = ::getenv("XPG_SUS_ENV");
4157  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4158  if (p && strcmp(p, "ON") == 0) {
4159    _xpg_sus_mode = 1;
4160    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4161    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4162    // clobber address ranges. If we ever want to support that, we have to do some
4163    // testing first.
4164    guarantee(false, "XPG_SUS_ENV=ON not supported");
4165  } else {
4166    _xpg_sus_mode = 0;
4167  }
4168
4169  if (os::Aix::on_pase()) {
4170    p = ::getenv("QIBM_MULTI_THREADED");
4171    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4172  }
4173
4174  p = ::getenv("LDR_CNTRL");
4175  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4176  if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4177    if (p && ::strstr(p, "TEXTPSIZE")) {
4178      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4179        "you may experience hangs or crashes on OS/400 V7R1.");
4180    }
4181  }
4182
4183  p = ::getenv("AIXTHREAD_GUARDPAGES");
4184  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4185
4186} // end: os::Aix::scan_environment()
4187
4188// PASE: initialize the libo4 library (PASE porting library).
4189void os::Aix::initialize_libo4() {
4190  guarantee(os::Aix::on_pase(), "OS/400 only.");
4191  if (!libo4::init()) {
4192    trcVerbose("libo4 initialization failed.");
4193    assert(false, "libo4 initialization failed");
4194  } else {
4195    trcVerbose("libo4 initialized.");
4196  }
4197}
4198
4199// AIX: initialize the libperfstat library.
4200void os::Aix::initialize_libperfstat() {
4201  assert(os::Aix::on_aix(), "AIX only");
4202  if (!libperfstat::init()) {
4203    trcVerbose("libperfstat initialization failed.");
4204    assert(false, "libperfstat initialization failed");
4205  } else {
4206    trcVerbose("libperfstat initialized.");
4207  }
4208}
4209
4210/////////////////////////////////////////////////////////////////////////////
4211// thread stack
4212
4213// Get the current stack base from the OS (actually, the pthread library).
4214// Note: usually not page aligned.
4215address os::current_stack_base() {
4216  AixMisc::stackbounds_t bounds;
4217  bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4218  guarantee(rc, "Unable to retrieve stack bounds.");
4219  return bounds.base;
4220}
4221
4222// Get the current stack size from the OS (actually, the pthread library).
4223// Returned size is such that (base - size) is always aligned to page size.
4224size_t os::current_stack_size() {
4225  AixMisc::stackbounds_t bounds;
4226  bool rc = AixMisc::query_stack_bounds_for_current_thread(&bounds);
4227  guarantee(rc, "Unable to retrieve stack bounds.");
4228  // Align the returned stack size such that the stack low address
4229  // is aligned to page size (Note: base is usually not and we do not care).
4230  // We need to do this because caller code will assume stack low address is
4231  // page aligned and will place guard pages without checking.
4232  address low = bounds.base - bounds.size;
4233  address low_aligned = (address)align_up(low, os::vm_page_size());
4234  size_t s = bounds.base - low_aligned;
4235  return s;
4236}
4237
4238extern char** environ;
4239
4240// Run the specified command in a separate process. Return its exit value,
4241// or -1 on failure (e.g. can't fork a new process).
4242// Unlike system(), this function can be called from signal handler. It
4243// doesn't block SIGINT et al.
4244int os::fork_and_exec(char* cmd) {
4245  char * argv[4] = {"sh", "-c", cmd, NULL};
4246
4247  pid_t pid = fork();
4248
4249  if (pid < 0) {
4250    // fork failed
4251    return -1;
4252
4253  } else if (pid == 0) {
4254    // child process
4255
4256    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4257    execve("/usr/bin/sh", argv, environ);
4258
4259    // execve failed
4260    _exit(-1);
4261
4262  } else {
4263    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4264    // care about the actual exit code, for now.
4265
4266    int status;
4267
4268    // Wait for the child process to exit. This returns immediately if
4269    // the child has already exited. */
4270    while (waitpid(pid, &status, 0) < 0) {
4271      switch (errno) {
4272        case ECHILD: return 0;
4273        case EINTR: break;
4274        default: return -1;
4275      }
4276    }
4277
4278    if (WIFEXITED(status)) {
4279      // The child exited normally; get its exit code.
4280      return WEXITSTATUS(status);
4281    } else if (WIFSIGNALED(status)) {
4282      // The child exited because of a signal.
4283      // The best value to return is 0x80 + signal number,
4284      // because that is what all Unix shells do, and because
4285      // it allows callers to distinguish between process exit and
4286      // process death by signal.
4287      return 0x80 + WTERMSIG(status);
4288    } else {
4289      // Unknown exit code; pass it through.
4290      return status;
4291    }
4292  }
4293  return -1;
4294}
4295
4296// is_headless_jre()
4297//
4298// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4299// in order to report if we are running in a headless jre.
4300//
4301// Since JDK8 xawt/libmawt.so is moved into the same directory
4302// as libawt.so, and renamed libawt_xawt.so
4303bool os::is_headless_jre() {
4304  struct stat statbuf;
4305  char buf[MAXPATHLEN];
4306  char libmawtpath[MAXPATHLEN];
4307  const char *xawtstr = "/xawt/libmawt.so";
4308  const char *new_xawtstr = "/libawt_xawt.so";
4309
4310  char *p;
4311
4312  // Get path to libjvm.so
4313  os::jvm_path(buf, sizeof(buf));
4314
4315  // Get rid of libjvm.so
4316  p = strrchr(buf, '/');
4317  if (p == NULL) return false;
4318  else *p = '\0';
4319
4320  // Get rid of client or server
4321  p = strrchr(buf, '/');
4322  if (p == NULL) return false;
4323  else *p = '\0';
4324
4325  // check xawt/libmawt.so
4326  strcpy(libmawtpath, buf);
4327  strcat(libmawtpath, xawtstr);
4328  if (::stat(libmawtpath, &statbuf) == 0) return false;
4329
4330  // check libawt_xawt.so
4331  strcpy(libmawtpath, buf);
4332  strcat(libmawtpath, new_xawtstr);
4333  if (::stat(libmawtpath, &statbuf) == 0) return false;
4334
4335  return true;
4336}
4337
4338// Get the default path to the core file
4339// Returns the length of the string
4340int os::get_core_path(char* buffer, size_t bufferSize) {
4341  const char* p = get_current_directory(buffer, bufferSize);
4342
4343  if (p == NULL) {
4344    assert(p != NULL, "failed to get current directory");
4345    return 0;
4346  }
4347
4348  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4349                                               p, current_process_id());
4350
4351  return strlen(buffer);
4352}
4353
4354#ifndef PRODUCT
4355void TestReserveMemorySpecial_test() {
4356  // No tests available for this platform
4357}
4358#endif
4359
4360bool os::start_debugging(char *buf, int buflen) {
4361  int len = (int)strlen(buf);
4362  char *p = &buf[len];
4363
4364  jio_snprintf(p, buflen -len,
4365                 "\n\n"
4366                 "Do you want to debug the problem?\n\n"
4367                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4368                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4369                 "Otherwise, press RETURN to abort...",
4370                 os::current_process_id(),
4371                 os::current_thread_id(), thread_self());
4372
4373  bool yes = os::message_box("Unexpected Error", buf);
4374
4375  if (yes) {
4376    // yes, user asked VM to launch debugger
4377    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4378
4379    os::fork_and_exec(buf);
4380    yes = false;
4381  }
4382  return yes;
4383}
4384
4385static inline time_t get_mtime(const char* filename) {
4386  struct stat st;
4387  int ret = os::stat(filename, &st);
4388  assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4389  return st.st_mtime;
4390}
4391
4392int os::compare_file_modified_times(const char* file1, const char* file2) {
4393  time_t t1 = get_mtime(file1);
4394  time_t t2 = get_mtime(file2);
4395  return t1 - t2;
4396}
4397