os_aix.cpp revision 9867:3125c4a60cc9
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libo4.hpp"
40#include "libperfstat_aix.hpp"
41#include "libodm_aix.hpp"
42#include "loadlib_aix.hpp"
43#include "memory/allocation.inline.hpp"
44#include "memory/filemap.hpp"
45#include "misc_aix.hpp"
46#include "mutex_aix.inline.hpp"
47#include "oops/oop.inline.hpp"
48#include "os_aix.inline.hpp"
49#include "os_share_aix.hpp"
50#include "porting_aix.hpp"
51#include "prims/jniFastGetField.hpp"
52#include "prims/jvm.h"
53#include "prims/jvm_misc.hpp"
54#include "runtime/arguments.hpp"
55#include "runtime/atomic.inline.hpp"
56#include "runtime/extendedPC.hpp"
57#include "runtime/globals.hpp"
58#include "runtime/interfaceSupport.hpp"
59#include "runtime/java.hpp"
60#include "runtime/javaCalls.hpp"
61#include "runtime/mutexLocker.hpp"
62#include "runtime/objectMonitor.hpp"
63#include "runtime/orderAccess.inline.hpp"
64#include "runtime/os.hpp"
65#include "runtime/osThread.hpp"
66#include "runtime/perfMemory.hpp"
67#include "runtime/sharedRuntime.hpp"
68#include "runtime/statSampler.hpp"
69#include "runtime/stubRoutines.hpp"
70#include "runtime/thread.inline.hpp"
71#include "runtime/threadCritical.hpp"
72#include "runtime/timer.hpp"
73#include "runtime/vm_version.hpp"
74#include "services/attachListener.hpp"
75#include "services/runtimeService.hpp"
76#include "utilities/decoder.hpp"
77#include "utilities/defaultStream.hpp"
78#include "utilities/events.hpp"
79#include "utilities/growableArray.hpp"
80#include "utilities/vmError.hpp"
81
82// put OS-includes here (sorted alphabetically)
83#include <errno.h>
84#include <fcntl.h>
85#include <inttypes.h>
86#include <poll.h>
87#include <procinfo.h>
88#include <pthread.h>
89#include <pwd.h>
90#include <semaphore.h>
91#include <signal.h>
92#include <stdint.h>
93#include <stdio.h>
94#include <string.h>
95#include <unistd.h>
96#include <sys/ioctl.h>
97#include <sys/ipc.h>
98#include <sys/mman.h>
99#include <sys/resource.h>
100#include <sys/select.h>
101#include <sys/shm.h>
102#include <sys/socket.h>
103#include <sys/stat.h>
104#include <sys/sysinfo.h>
105#include <sys/systemcfg.h>
106#include <sys/time.h>
107#include <sys/times.h>
108#include <sys/types.h>
109#include <sys/utsname.h>
110#include <sys/vminfo.h>
111#include <sys/wait.h>
112
113// Missing prototypes for various system APIs.
114extern "C"
115int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
116
117#if !defined(_AIXVERSION_610)
118extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
119extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
120extern "C" int getargs   (procsinfo*, int, char*, int);
121#endif
122
123#define MAX_PATH (2 * K)
124
125// for timer info max values which include all bits
126#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
127// for multipage initialization error analysis (in 'g_multipage_error')
128#define ERROR_MP_OS_TOO_OLD                          100
129#define ERROR_MP_EXTSHM_ACTIVE                       101
130#define ERROR_MP_VMGETINFO_FAILED                    102
131#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
132
133// The semantics in this file are thus that codeptr_t is a *real code ptr*.
134// This means that any function taking codeptr_t as arguments will assume
135// a real codeptr and won't handle function descriptors (eg getFuncName),
136// whereas functions taking address as args will deal with function
137// descriptors (eg os::dll_address_to_library_name).
138typedef unsigned int* codeptr_t;
139
140// Typedefs for stackslots, stack pointers, pointers to op codes.
141typedef unsigned long stackslot_t;
142typedef stackslot_t* stackptr_t;
143
144// Query dimensions of the stack of the calling thread.
145static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
146static address resolve_function_descriptor_to_code_pointer(address p);
147
148// Function to check a given stack pointer against given stack limits.
149inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
150  if (((uintptr_t)sp) & 0x7) {
151    return false;
152  }
153  if (sp > stack_base) {
154    return false;
155  }
156  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
157    return false;
158  }
159  return true;
160}
161
162// Returns true if function is a valid codepointer.
163inline bool is_valid_codepointer(codeptr_t p) {
164  if (!p) {
165    return false;
166  }
167  if (((uintptr_t)p) & 0x3) {
168    return false;
169  }
170  if (LoadedLibraries::find_for_text_address(p, NULL) == NULL) {
171    return false;
172  }
173  return true;
174}
175
176// Macro to check a given stack pointer against given stack limits and to die if test fails.
177#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
178    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
179}
180
181// Macro to check the current stack pointer against given stacklimits.
182#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
183  address sp; \
184  sp = os::current_stack_pointer(); \
185  CHECK_STACK_PTR(sp, stack_base, stack_size); \
186}
187
188static void vmembk_print_on(outputStream* os);
189
190////////////////////////////////////////////////////////////////////////////////
191// global variables (for a description see os_aix.hpp)
192
193julong    os::Aix::_physical_memory = 0;
194
195pthread_t os::Aix::_main_thread = ((pthread_t)0);
196int       os::Aix::_page_size = -1;
197
198// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
199int       os::Aix::_on_pase = -1;
200
201// 0 = uninitialized, otherwise 32 bit number:
202//  0xVVRRTTSS
203//  VV - major version
204//  RR - minor version
205//  TT - tech level, if known, 0 otherwise
206//  SS - service pack, if known, 0 otherwise
207uint32_t  os::Aix::_os_version = 0;
208
209int       os::Aix::_stack_page_size = -1;
210
211// -1 = uninitialized, 0 - no, 1 - yes
212int       os::Aix::_xpg_sus_mode = -1;
213
214// -1 = uninitialized, 0 - no, 1 - yes
215int       os::Aix::_extshm = -1;
216
217////////////////////////////////////////////////////////////////////////////////
218// local variables
219
220static jlong    initial_time_count = 0;
221static int      clock_tics_per_sec = 100;
222static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
223static bool     check_signals      = true;
224static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
225static sigset_t SR_sigset;
226
227// Process break recorded at startup.
228static address g_brk_at_startup = NULL;
229
230// This describes the state of multipage support of the underlying
231// OS. Note that this is of no interest to the outsize world and
232// therefore should not be defined in AIX class.
233//
234// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
235// latter two (16M "large" resp. 16G "huge" pages) require special
236// setup and are normally not available.
237//
238// AIX supports multiple page sizes per process, for:
239//  - Stack (of the primordial thread, so not relevant for us)
240//  - Data - data, bss, heap, for us also pthread stacks
241//  - Text - text code
242//  - shared memory
243//
244// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
245// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
246//
247// For shared memory, page size can be set dynamically via
248// shmctl(). Different shared memory regions can have different page
249// sizes.
250//
251// More information can be found at AIBM info center:
252//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
253//
254static struct {
255  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
256  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
257  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
258  size_t pthr_stack_pagesize; // stack page size of pthread threads
259  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
260  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
261  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
262  int error;                  // Error describing if something went wrong at multipage init.
263} g_multipage_support = {
264  (size_t) -1,
265  (size_t) -1,
266  (size_t) -1,
267  (size_t) -1,
268  (size_t) -1,
269  false, false,
270  0
271};
272
273// We must not accidentally allocate memory close to the BRK - even if
274// that would work - because then we prevent the BRK segment from
275// growing which may result in a malloc OOM even though there is
276// enough memory. The problem only arises if we shmat() or mmap() at
277// a specific wish address, e.g. to place the heap in a
278// compressed-oops-friendly way.
279static bool is_close_to_brk(address a) {
280  assert0(g_brk_at_startup != NULL);
281  if (a >= g_brk_at_startup &&
282      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
283    return true;
284  }
285  return false;
286}
287
288julong os::available_memory() {
289  return Aix::available_memory();
290}
291
292julong os::Aix::available_memory() {
293  // Avoid expensive API call here, as returned value will always be null.
294  if (os::Aix::on_pase()) {
295    return 0x0LL;
296  }
297  os::Aix::meminfo_t mi;
298  if (os::Aix::get_meminfo(&mi)) {
299    return mi.real_free;
300  } else {
301    return ULONG_MAX;
302  }
303}
304
305julong os::physical_memory() {
306  return Aix::physical_memory();
307}
308
309// Return true if user is running as root.
310
311bool os::have_special_privileges() {
312  static bool init = false;
313  static bool privileges = false;
314  if (!init) {
315    privileges = (getuid() != geteuid()) || (getgid() != getegid());
316    init = true;
317  }
318  return privileges;
319}
320
321// Helper function, emulates disclaim64 using multiple 32bit disclaims
322// because we cannot use disclaim64() on AS/400 and old AIX releases.
323static bool my_disclaim64(char* addr, size_t size) {
324
325  if (size == 0) {
326    return true;
327  }
328
329  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
330  const unsigned int maxDisclaimSize = 0x40000000;
331
332  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
333  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
334
335  char* p = addr;
336
337  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
338    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
339      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
340      return false;
341    }
342    p += maxDisclaimSize;
343  }
344
345  if (lastDisclaimSize > 0) {
346    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
347      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
348      return false;
349    }
350  }
351
352  return true;
353}
354
355// Cpu architecture string
356#if defined(PPC32)
357static char cpu_arch[] = "ppc";
358#elif defined(PPC64)
359static char cpu_arch[] = "ppc64";
360#else
361#error Add appropriate cpu_arch setting
362#endif
363
364// Wrap the function "vmgetinfo" which is not available on older OS releases.
365static int checked_vmgetinfo(void *out, int command, int arg) {
366  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
367    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
368  }
369  return ::vmgetinfo(out, command, arg);
370}
371
372// Given an address, returns the size of the page backing that address.
373size_t os::Aix::query_pagesize(void* addr) {
374
375  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
376    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
377    return SIZE_4K;
378  }
379
380  vm_page_info pi;
381  pi.addr = (uint64_t)addr;
382  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
383    return pi.pagesize;
384  } else {
385    assert(false, "vmgetinfo failed to retrieve page size");
386    return SIZE_4K;
387  }
388}
389
390void os::Aix::initialize_system_info() {
391
392  // Get the number of online(logical) cpus instead of configured.
393  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
394  assert(_processor_count > 0, "_processor_count must be > 0");
395
396  // Retrieve total physical storage.
397  os::Aix::meminfo_t mi;
398  if (!os::Aix::get_meminfo(&mi)) {
399    assert(false, "os::Aix::get_meminfo failed.");
400  }
401  _physical_memory = (julong) mi.real_total;
402}
403
404// Helper function for tracing page sizes.
405static const char* describe_pagesize(size_t pagesize) {
406  switch (pagesize) {
407    case SIZE_4K : return "4K";
408    case SIZE_64K: return "64K";
409    case SIZE_16M: return "16M";
410    case SIZE_16G: return "16G";
411    default:
412      assert(false, "surprise");
413      return "??";
414  }
415}
416
417// Probe OS for multipage support.
418// Will fill the global g_multipage_support structure.
419// Must be called before calling os::large_page_init().
420static void query_multipage_support() {
421
422  guarantee(g_multipage_support.pagesize == -1,
423            "do not call twice");
424
425  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
426
427  // This really would surprise me.
428  assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
429
430  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
431  // Default data page size is defined either by linker options (-bdatapsize)
432  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
433  // default should be 4K.
434  {
435    void* p = ::malloc(SIZE_16M);
436    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
437    ::free(p);
438  }
439
440  // Query default shm page size (LDR_CNTRL SHMPSIZE).
441  // Note that this is pure curiosity. We do not rely on default page size but set
442  // our own page size after allocated.
443  {
444    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
445    guarantee(shmid != -1, "shmget failed");
446    void* p = ::shmat(shmid, NULL, 0);
447    ::shmctl(shmid, IPC_RMID, NULL);
448    guarantee(p != (void*) -1, "shmat failed");
449    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
450    ::shmdt(p);
451  }
452
453  // Before querying the stack page size, make sure we are not running as primordial
454  // thread (because primordial thread's stack may have different page size than
455  // pthread thread stacks). Running a VM on the primordial thread won't work for a
456  // number of reasons so we may just as well guarantee it here.
457  guarantee0(!os::Aix::is_primordial_thread());
458
459  // Query pthread stack page size. Should be the same as data page size because
460  // pthread stacks are allocated from C-Heap.
461  {
462    int dummy = 0;
463    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
464  }
465
466  // Query default text page size (LDR_CNTRL TEXTPSIZE).
467  {
468    address any_function =
469      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
470    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
471  }
472
473  // Now probe for support of 64K pages and 16M pages.
474
475  // Before OS/400 V6R1, there is no support for pages other than 4K.
476  if (os::Aix::on_pase_V5R4_or_older()) {
477    trcVerbose("OS/400 < V6R1 - no large page support.");
478    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
479    goto query_multipage_support_end;
480  }
481
482  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
483  {
484    const int MAX_PAGE_SIZES = 4;
485    psize_t sizes[MAX_PAGE_SIZES];
486    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
487    if (num_psizes == -1) {
488      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
489      trcVerbose("disabling multipage support.");
490      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
491      goto query_multipage_support_end;
492    }
493    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
494    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
495    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
496    for (int i = 0; i < num_psizes; i ++) {
497      trcVerbose(" %s ", describe_pagesize(sizes[i]));
498    }
499
500    // Can we use 64K, 16M pages?
501    for (int i = 0; i < num_psizes; i ++) {
502      const size_t pagesize = sizes[i];
503      if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
504        continue;
505      }
506      bool can_use = false;
507      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
508      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
509        IPC_CREAT | S_IRUSR | S_IWUSR);
510      guarantee0(shmid != -1); // Should always work.
511      // Try to set pagesize.
512      struct shmid_ds shm_buf = { 0 };
513      shm_buf.shm_pagesize = pagesize;
514      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
515        const int en = errno;
516        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
517        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
518          errno);
519      } else {
520        // Attach and double check pageisze.
521        void* p = ::shmat(shmid, NULL, 0);
522        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
523        guarantee0(p != (void*) -1); // Should always work.
524        const size_t real_pagesize = os::Aix::query_pagesize(p);
525        if (real_pagesize != pagesize) {
526          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
527        } else {
528          can_use = true;
529        }
530        ::shmdt(p);
531      }
532      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
533      if (pagesize == SIZE_64K) {
534        g_multipage_support.can_use_64K_pages = can_use;
535      } else if (pagesize == SIZE_16M) {
536        g_multipage_support.can_use_16M_pages = can_use;
537      }
538    }
539
540  } // end: check which pages can be used for shared memory
541
542query_multipage_support_end:
543
544  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
545      describe_pagesize(g_multipage_support.pagesize));
546  trcVerbose("Data page size (C-Heap, bss, etc): %s",
547      describe_pagesize(g_multipage_support.datapsize));
548  trcVerbose("Text page size: %s",
549      describe_pagesize(g_multipage_support.textpsize));
550  trcVerbose("Thread stack page size (pthread): %s",
551      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
552  trcVerbose("Default shared memory page size: %s",
553      describe_pagesize(g_multipage_support.shmpsize));
554  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
555      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
556  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
557      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
558  trcVerbose("Multipage error details: %d",
559      g_multipage_support.error);
560
561  // sanity checks
562  assert0(g_multipage_support.pagesize == SIZE_4K);
563  assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
564  assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
565  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
566  assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
567
568}
569
570void os::init_system_properties_values() {
571
572#define DEFAULT_LIBPATH "/lib:/usr/lib"
573#define EXTENSIONS_DIR  "/lib/ext"
574
575  // Buffer that fits several sprintfs.
576  // Note that the space for the trailing null is provided
577  // by the nulls included by the sizeof operator.
578  const size_t bufsize =
579    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
580         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
581  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
582
583  // sysclasspath, java_home, dll_dir
584  {
585    char *pslash;
586    os::jvm_path(buf, bufsize);
587
588    // Found the full path to libjvm.so.
589    // Now cut the path to <java_home>/jre if we can.
590    pslash = strrchr(buf, '/');
591    if (pslash != NULL) {
592      *pslash = '\0';            // Get rid of /libjvm.so.
593    }
594    pslash = strrchr(buf, '/');
595    if (pslash != NULL) {
596      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
597    }
598    Arguments::set_dll_dir(buf);
599
600    if (pslash != NULL) {
601      pslash = strrchr(buf, '/');
602      if (pslash != NULL) {
603        *pslash = '\0';          // Get rid of /<arch>.
604        pslash = strrchr(buf, '/');
605        if (pslash != NULL) {
606          *pslash = '\0';        // Get rid of /lib.
607        }
608      }
609    }
610    Arguments::set_java_home(buf);
611    set_boot_path('/', ':');
612  }
613
614  // Where to look for native libraries.
615
616  // On Aix we get the user setting of LIBPATH.
617  // Eventually, all the library path setting will be done here.
618  // Get the user setting of LIBPATH.
619  const char *v = ::getenv("LIBPATH");
620  const char *v_colon = ":";
621  if (v == NULL) { v = ""; v_colon = ""; }
622
623  // Concatenate user and invariant part of ld_library_path.
624  // That's +1 for the colon and +1 for the trailing '\0'.
625  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
626  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
627  Arguments::set_library_path(ld_library_path);
628  FREE_C_HEAP_ARRAY(char, ld_library_path);
629
630  // Extensions directories.
631  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
632  Arguments::set_ext_dirs(buf);
633
634  FREE_C_HEAP_ARRAY(char, buf);
635
636#undef DEFAULT_LIBPATH
637#undef EXTENSIONS_DIR
638}
639
640////////////////////////////////////////////////////////////////////////////////
641// breakpoint support
642
643void os::breakpoint() {
644  BREAKPOINT;
645}
646
647extern "C" void breakpoint() {
648  // use debugger to set breakpoint here
649}
650
651////////////////////////////////////////////////////////////////////////////////
652// signal support
653
654debug_only(static bool signal_sets_initialized = false);
655static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
656
657bool os::Aix::is_sig_ignored(int sig) {
658  struct sigaction oact;
659  sigaction(sig, (struct sigaction*)NULL, &oact);
660  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
661    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
662  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
663    return true;
664  } else {
665    return false;
666  }
667}
668
669void os::Aix::signal_sets_init() {
670  // Should also have an assertion stating we are still single-threaded.
671  assert(!signal_sets_initialized, "Already initialized");
672  // Fill in signals that are necessarily unblocked for all threads in
673  // the VM. Currently, we unblock the following signals:
674  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
675  //                         by -Xrs (=ReduceSignalUsage));
676  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
677  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
678  // the dispositions or masks wrt these signals.
679  // Programs embedding the VM that want to use the above signals for their
680  // own purposes must, at this time, use the "-Xrs" option to prevent
681  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
682  // (See bug 4345157, and other related bugs).
683  // In reality, though, unblocking these signals is really a nop, since
684  // these signals are not blocked by default.
685  sigemptyset(&unblocked_sigs);
686  sigemptyset(&allowdebug_blocked_sigs);
687  sigaddset(&unblocked_sigs, SIGILL);
688  sigaddset(&unblocked_sigs, SIGSEGV);
689  sigaddset(&unblocked_sigs, SIGBUS);
690  sigaddset(&unblocked_sigs, SIGFPE);
691  sigaddset(&unblocked_sigs, SIGTRAP);
692  sigaddset(&unblocked_sigs, SIGDANGER);
693  sigaddset(&unblocked_sigs, SR_signum);
694
695  if (!ReduceSignalUsage) {
696   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
697     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
698     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
699   }
700   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
701     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
702     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
703   }
704   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
705     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
706     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
707   }
708  }
709  // Fill in signals that are blocked by all but the VM thread.
710  sigemptyset(&vm_sigs);
711  if (!ReduceSignalUsage)
712    sigaddset(&vm_sigs, BREAK_SIGNAL);
713  debug_only(signal_sets_initialized = true);
714}
715
716// These are signals that are unblocked while a thread is running Java.
717// (For some reason, they get blocked by default.)
718sigset_t* os::Aix::unblocked_signals() {
719  assert(signal_sets_initialized, "Not initialized");
720  return &unblocked_sigs;
721}
722
723// These are the signals that are blocked while a (non-VM) thread is
724// running Java. Only the VM thread handles these signals.
725sigset_t* os::Aix::vm_signals() {
726  assert(signal_sets_initialized, "Not initialized");
727  return &vm_sigs;
728}
729
730// These are signals that are blocked during cond_wait to allow debugger in
731sigset_t* os::Aix::allowdebug_blocked_signals() {
732  assert(signal_sets_initialized, "Not initialized");
733  return &allowdebug_blocked_sigs;
734}
735
736void os::Aix::hotspot_sigmask(Thread* thread) {
737
738  //Save caller's signal mask before setting VM signal mask
739  sigset_t caller_sigmask;
740  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
741
742  OSThread* osthread = thread->osthread();
743  osthread->set_caller_sigmask(caller_sigmask);
744
745  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
746
747  if (!ReduceSignalUsage) {
748    if (thread->is_VM_thread()) {
749      // Only the VM thread handles BREAK_SIGNAL ...
750      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
751    } else {
752      // ... all other threads block BREAK_SIGNAL
753      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
754    }
755  }
756}
757
758// retrieve memory information.
759// Returns false if something went wrong;
760// content of pmi undefined in this case.
761bool os::Aix::get_meminfo(meminfo_t* pmi) {
762
763  assert(pmi, "get_meminfo: invalid parameter");
764
765  memset(pmi, 0, sizeof(meminfo_t));
766
767  if (os::Aix::on_pase()) {
768    // On PASE, use the libo4 porting library.
769
770    unsigned long long virt_total = 0;
771    unsigned long long real_total = 0;
772    unsigned long long real_free = 0;
773    unsigned long long pgsp_total = 0;
774    unsigned long long pgsp_free = 0;
775    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
776      pmi->virt_total = virt_total;
777      pmi->real_total = real_total;
778      pmi->real_free = real_free;
779      pmi->pgsp_total = pgsp_total;
780      pmi->pgsp_free = pgsp_free;
781      return true;
782    }
783    return false;
784
785  } else {
786
787    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
788    // See:
789    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
790    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
791    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
792    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
793
794    perfstat_memory_total_t psmt;
795    memset (&psmt, '\0', sizeof(psmt));
796    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
797    if (rc == -1) {
798      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
799      assert(0, "perfstat_memory_total() failed");
800      return false;
801    }
802
803    assert(rc == 1, "perfstat_memory_total() - weird return code");
804
805    // excerpt from
806    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
807    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
808    // The fields of perfstat_memory_total_t:
809    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
810    // u_longlong_t real_total         Total real memory (in 4 KB pages).
811    // u_longlong_t real_free          Free real memory (in 4 KB pages).
812    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
813    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
814
815    pmi->virt_total = psmt.virt_total * 4096;
816    pmi->real_total = psmt.real_total * 4096;
817    pmi->real_free = psmt.real_free * 4096;
818    pmi->pgsp_total = psmt.pgsp_total * 4096;
819    pmi->pgsp_free = psmt.pgsp_free * 4096;
820
821    return true;
822
823  }
824} // end os::Aix::get_meminfo
825
826//////////////////////////////////////////////////////////////////////////////
827// create new thread
828
829// Thread start routine for all newly created threads
830static void *java_start(Thread *thread) {
831
832  // find out my own stack dimensions
833  {
834    // actually, this should do exactly the same as thread->record_stack_base_and_size...
835    address base = 0;
836    size_t size = 0;
837    query_stack_dimensions(&base, &size);
838    thread->set_stack_base(base);
839    thread->set_stack_size(size);
840  }
841
842  const pthread_t pthread_id = ::pthread_self();
843  const tid_t kernel_thread_id = ::thread_self();
844
845  trcVerbose("newborn Thread : pthread-id %u, ktid " UINT64_FORMAT
846    ", stack %p ... %p, stacksize 0x%IX (%IB)",
847    pthread_id, kernel_thread_id,
848    thread->stack_end(),
849    thread->stack_base(),
850    thread->stack_size(),
851    thread->stack_size());
852
853  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
854  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
855  // tools hook pthread_create(). In this case, we may run into problems establishing
856  // guard pages on those stacks, because the stacks may reside in memory which is not
857  // protectable (shmated).
858  if (thread->stack_base() > ::sbrk(0)) {
859    trcVerbose("Thread " UINT64_FORMAT ": stack not in data segment.", (uint64_t) pthread_id);
860  }
861
862  // Do some sanity checks.
863  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
864
865  // Try to randomize the cache line index of hot stack frames.
866  // This helps when threads of the same stack traces evict each other's
867  // cache lines. The threads can be either from the same JVM instance, or
868  // from different JVM instances. The benefit is especially true for
869  // processors with hyperthreading technology.
870
871  static int counter = 0;
872  int pid = os::current_process_id();
873  alloca(((pid ^ counter++) & 7) * 128);
874
875  thread->initialize_thread_current();
876
877  OSThread* osthread = thread->osthread();
878
879  // Thread_id is pthread id.
880  osthread->set_thread_id(pthread_id);
881
882  // .. but keep kernel thread id too for diagnostics
883  osthread->set_kernel_thread_id(kernel_thread_id);
884
885  // Initialize signal mask for this thread.
886  os::Aix::hotspot_sigmask(thread);
887
888  // Initialize floating point control register.
889  os::Aix::init_thread_fpu_state();
890
891  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
892
893  // Call one more level start routine.
894  thread->run();
895
896  trcVerbose("Thread finished : pthread-id %u, ktid " UINT64_FORMAT ".",
897    pthread_id, kernel_thread_id);
898
899  return 0;
900}
901
902bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
903
904  assert(thread->osthread() == NULL, "caller responsible");
905
906  // Allocate the OSThread object
907  OSThread* osthread = new OSThread(NULL, NULL);
908  if (osthread == NULL) {
909    return false;
910  }
911
912  // set the correct thread state
913  osthread->set_thread_type(thr_type);
914
915  // Initial state is ALLOCATED but not INITIALIZED
916  osthread->set_state(ALLOCATED);
917
918  thread->set_osthread(osthread);
919
920  // init thread attributes
921  pthread_attr_t attr;
922  pthread_attr_init(&attr);
923  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
924
925  // Make sure we run in 1:1 kernel-user-thread mode.
926  if (os::Aix::on_aix()) {
927    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
928    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
929  } // end: aix
930
931  // Start in suspended state, and in os::thread_start, wake the thread up.
932  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
933
934  // calculate stack size if it's not specified by caller
935  if (stack_size == 0) {
936    stack_size = os::Aix::default_stack_size(thr_type);
937
938    switch (thr_type) {
939    case os::java_thread:
940      // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
941      assert(JavaThread::stack_size_at_create() > 0, "this should be set");
942      stack_size = JavaThread::stack_size_at_create();
943      break;
944    case os::compiler_thread:
945      if (CompilerThreadStackSize > 0) {
946        stack_size = (size_t)(CompilerThreadStackSize * K);
947        break;
948      } // else fall through:
949        // use VMThreadStackSize if CompilerThreadStackSize is not defined
950    case os::vm_thread:
951    case os::pgc_thread:
952    case os::cgc_thread:
953    case os::watcher_thread:
954      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
955      break;
956    }
957  }
958
959  stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
960  pthread_attr_setstacksize(&attr, stack_size);
961
962  pthread_t tid;
963  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
964
965  pthread_attr_destroy(&attr);
966
967  if (ret == 0) {
968    trcVerbose("Created New Thread : pthread-id %u", tid);
969  } else {
970    if (os::Aix::on_pase()) {
971      // QIBM_MULTI_THREADED=Y is needed when the launcher is started on iSeries
972      // using QSH. Otherwise pthread_create fails with errno=11.
973      trcVerbose("(Please make sure you set the environment variable "
974              "QIBM_MULTI_THREADED=Y before running this program.)");
975    }
976    if (PrintMiscellaneous && (Verbose || WizardMode)) {
977      perror("pthread_create()");
978    }
979    // Need to clean up stuff we've allocated so far
980    thread->set_osthread(NULL);
981    delete osthread;
982    return false;
983  }
984
985  // OSThread::thread_id is the pthread id.
986  osthread->set_thread_id(tid);
987
988  return true;
989}
990
991/////////////////////////////////////////////////////////////////////////////
992// attach existing thread
993
994// bootstrap the main thread
995bool os::create_main_thread(JavaThread* thread) {
996  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
997  return create_attached_thread(thread);
998}
999
1000bool os::create_attached_thread(JavaThread* thread) {
1001#ifdef ASSERT
1002    thread->verify_not_published();
1003#endif
1004
1005  // Allocate the OSThread object
1006  OSThread* osthread = new OSThread(NULL, NULL);
1007
1008  if (osthread == NULL) {
1009    return false;
1010  }
1011
1012  const pthread_t pthread_id = ::pthread_self();
1013  const tid_t kernel_thread_id = ::thread_self();
1014
1015  trcVerbose("attaching Thread : pthread-id %u, ktid " UINT64_FORMAT ", stack %p ... %p, stacksize 0x%IX (%IB)",
1016    pthread_id, kernel_thread_id,
1017    thread->stack_end(),
1018    thread->stack_base(),
1019    thread->stack_size(),
1020    thread->stack_size());
1021
1022  // OSThread::thread_id is the pthread id.
1023  osthread->set_thread_id(pthread_id);
1024
1025  // .. but keep kernel thread id too for diagnostics
1026  osthread->set_kernel_thread_id(kernel_thread_id);
1027
1028  // initialize floating point control register
1029  os::Aix::init_thread_fpu_state();
1030
1031  // some sanity checks
1032  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1033
1034  // Initial thread state is RUNNABLE
1035  osthread->set_state(RUNNABLE);
1036
1037  thread->set_osthread(osthread);
1038
1039  if (UseNUMA) {
1040    int lgrp_id = os::numa_get_group_id();
1041    if (lgrp_id != -1) {
1042      thread->set_lgrp_id(lgrp_id);
1043    }
1044  }
1045
1046  // initialize signal mask for this thread
1047  // and save the caller's signal mask
1048  os::Aix::hotspot_sigmask(thread);
1049
1050  return true;
1051}
1052
1053void os::pd_start_thread(Thread* thread) {
1054  int status = pthread_continue_np(thread->osthread()->pthread_id());
1055  assert(status == 0, "thr_continue failed");
1056}
1057
1058// Free OS resources related to the OSThread
1059void os::free_thread(OSThread* osthread) {
1060  assert(osthread != NULL, "osthread not set");
1061
1062  if (Thread::current()->osthread() == osthread) {
1063    // Restore caller's signal mask
1064    sigset_t sigmask = osthread->caller_sigmask();
1065    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1066   }
1067
1068  delete osthread;
1069}
1070
1071////////////////////////////////////////////////////////////////////////////////
1072// time support
1073
1074// Time since start-up in seconds to a fine granularity.
1075// Used by VMSelfDestructTimer and the MemProfiler.
1076double os::elapsedTime() {
1077  return (double)(os::elapsed_counter()) * 0.000001;
1078}
1079
1080jlong os::elapsed_counter() {
1081  timeval time;
1082  int status = gettimeofday(&time, NULL);
1083  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1084}
1085
1086jlong os::elapsed_frequency() {
1087  return (1000 * 1000);
1088}
1089
1090bool os::supports_vtime() { return true; }
1091bool os::enable_vtime()   { return false; }
1092bool os::vtime_enabled()  { return false; }
1093
1094double os::elapsedVTime() {
1095  struct rusage usage;
1096  int retval = getrusage(RUSAGE_THREAD, &usage);
1097  if (retval == 0) {
1098    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1099  } else {
1100    // better than nothing, but not much
1101    return elapsedTime();
1102  }
1103}
1104
1105jlong os::javaTimeMillis() {
1106  timeval time;
1107  int status = gettimeofday(&time, NULL);
1108  assert(status != -1, "aix error at gettimeofday()");
1109  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1110}
1111
1112void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1113  timeval time;
1114  int status = gettimeofday(&time, NULL);
1115  assert(status != -1, "aix error at gettimeofday()");
1116  seconds = jlong(time.tv_sec);
1117  nanos = jlong(time.tv_usec) * 1000;
1118}
1119
1120jlong os::javaTimeNanos() {
1121  if (os::Aix::on_pase()) {
1122
1123    timeval time;
1124    int status = gettimeofday(&time, NULL);
1125    assert(status != -1, "PASE error at gettimeofday()");
1126    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1127    return 1000 * usecs;
1128
1129  } else {
1130    // On AIX use the precision of processors real time clock
1131    // or time base registers.
1132    timebasestruct_t time;
1133    int rc;
1134
1135    // If the CPU has a time register, it will be used and
1136    // we have to convert to real time first. After convertion we have following data:
1137    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1138    // time.tb_low  [nanoseconds after the last full second above]
1139    // We better use mread_real_time here instead of read_real_time
1140    // to ensure that we will get a monotonic increasing time.
1141    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1142      rc = time_base_to_time(&time, TIMEBASE_SZ);
1143      assert(rc != -1, "aix error at time_base_to_time()");
1144    }
1145    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1146  }
1147}
1148
1149void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1150  info_ptr->max_value = ALL_64_BITS;
1151  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1152  info_ptr->may_skip_backward = false;
1153  info_ptr->may_skip_forward = false;
1154  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1155}
1156
1157// Return the real, user, and system times in seconds from an
1158// arbitrary fixed point in the past.
1159bool os::getTimesSecs(double* process_real_time,
1160                      double* process_user_time,
1161                      double* process_system_time) {
1162  struct tms ticks;
1163  clock_t real_ticks = times(&ticks);
1164
1165  if (real_ticks == (clock_t) (-1)) {
1166    return false;
1167  } else {
1168    double ticks_per_second = (double) clock_tics_per_sec;
1169    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1170    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1171    *process_real_time = ((double) real_ticks) / ticks_per_second;
1172
1173    return true;
1174  }
1175}
1176
1177char * os::local_time_string(char *buf, size_t buflen) {
1178  struct tm t;
1179  time_t long_time;
1180  time(&long_time);
1181  localtime_r(&long_time, &t);
1182  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1183               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1184               t.tm_hour, t.tm_min, t.tm_sec);
1185  return buf;
1186}
1187
1188struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1189  return localtime_r(clock, res);
1190}
1191
1192////////////////////////////////////////////////////////////////////////////////
1193// runtime exit support
1194
1195// Note: os::shutdown() might be called very early during initialization, or
1196// called from signal handler. Before adding something to os::shutdown(), make
1197// sure it is async-safe and can handle partially initialized VM.
1198void os::shutdown() {
1199
1200  // allow PerfMemory to attempt cleanup of any persistent resources
1201  perfMemory_exit();
1202
1203  // needs to remove object in file system
1204  AttachListener::abort();
1205
1206  // flush buffered output, finish log files
1207  ostream_abort();
1208
1209  // Check for abort hook
1210  abort_hook_t abort_hook = Arguments::abort_hook();
1211  if (abort_hook != NULL) {
1212    abort_hook();
1213  }
1214}
1215
1216// Note: os::abort() might be called very early during initialization, or
1217// called from signal handler. Before adding something to os::abort(), make
1218// sure it is async-safe and can handle partially initialized VM.
1219void os::abort(bool dump_core, void* siginfo, const void* context) {
1220  os::shutdown();
1221  if (dump_core) {
1222#ifndef PRODUCT
1223    fdStream out(defaultStream::output_fd());
1224    out.print_raw("Current thread is ");
1225    char buf[16];
1226    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1227    out.print_raw_cr(buf);
1228    out.print_raw_cr("Dumping core ...");
1229#endif
1230    ::abort(); // dump core
1231  }
1232
1233  ::exit(1);
1234}
1235
1236// Die immediately, no exit hook, no abort hook, no cleanup.
1237void os::die() {
1238  ::abort();
1239}
1240
1241// This method is a copy of JDK's sysGetLastErrorString
1242// from src/solaris/hpi/src/system_md.c
1243
1244size_t os::lasterror(char *buf, size_t len) {
1245  if (errno == 0) return 0;
1246
1247  const char *s = ::strerror(errno);
1248  size_t n = ::strlen(s);
1249  if (n >= len) {
1250    n = len - 1;
1251  }
1252  ::strncpy(buf, s, n);
1253  buf[n] = '\0';
1254  return n;
1255}
1256
1257intx os::current_thread_id() {
1258  return (intx)pthread_self();
1259}
1260
1261int os::current_process_id() {
1262  return getpid();
1263}
1264
1265// DLL functions
1266
1267const char* os::dll_file_extension() { return ".so"; }
1268
1269// This must be hard coded because it's the system's temporary
1270// directory not the java application's temp directory, ala java.io.tmpdir.
1271const char* os::get_temp_directory() { return "/tmp"; }
1272
1273static bool file_exists(const char* filename) {
1274  struct stat statbuf;
1275  if (filename == NULL || strlen(filename) == 0) {
1276    return false;
1277  }
1278  return os::stat(filename, &statbuf) == 0;
1279}
1280
1281bool os::dll_build_name(char* buffer, size_t buflen,
1282                        const char* pname, const char* fname) {
1283  bool retval = false;
1284  // Copied from libhpi
1285  const size_t pnamelen = pname ? strlen(pname) : 0;
1286
1287  // Return error on buffer overflow.
1288  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1289    *buffer = '\0';
1290    return retval;
1291  }
1292
1293  if (pnamelen == 0) {
1294    snprintf(buffer, buflen, "lib%s.so", fname);
1295    retval = true;
1296  } else if (strchr(pname, *os::path_separator()) != NULL) {
1297    int n;
1298    char** pelements = split_path(pname, &n);
1299    if (pelements == NULL) {
1300      return false;
1301    }
1302    for (int i = 0; i < n; i++) {
1303      // Really shouldn't be NULL, but check can't hurt
1304      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1305        continue; // skip the empty path values
1306      }
1307      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1308      if (file_exists(buffer)) {
1309        retval = true;
1310        break;
1311      }
1312    }
1313    // release the storage
1314    for (int i = 0; i < n; i++) {
1315      if (pelements[i] != NULL) {
1316        FREE_C_HEAP_ARRAY(char, pelements[i]);
1317      }
1318    }
1319    if (pelements != NULL) {
1320      FREE_C_HEAP_ARRAY(char*, pelements);
1321    }
1322  } else {
1323    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1324    retval = true;
1325  }
1326  return retval;
1327}
1328
1329// Check if addr is inside libjvm.so.
1330bool os::address_is_in_vm(address addr) {
1331
1332  // Input could be a real pc or a function pointer literal. The latter
1333  // would be a function descriptor residing in the data segment of a module.
1334  loaded_module_t lm;
1335  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1336    return lm.is_in_vm;
1337  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1338    return lm.is_in_vm;
1339  } else {
1340    return false;
1341  }
1342
1343}
1344
1345// Resolve an AIX function descriptor literal to a code pointer.
1346// If the input is a valid code pointer to a text segment of a loaded module,
1347//   it is returned unchanged.
1348// If the input is a valid AIX function descriptor, it is resolved to the
1349//   code entry point.
1350// If the input is neither a valid function descriptor nor a valid code pointer,
1351//   NULL is returned.
1352static address resolve_function_descriptor_to_code_pointer(address p) {
1353
1354  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1355    // It is a real code pointer.
1356    return p;
1357  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1358    // Pointer to data segment, potential function descriptor.
1359    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1360    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1361      // It is a function descriptor.
1362      return code_entry;
1363    }
1364  }
1365
1366  return NULL;
1367}
1368
1369bool os::dll_address_to_function_name(address addr, char *buf,
1370                                      int buflen, int *offset,
1371                                      bool demangle) {
1372  if (offset) {
1373    *offset = -1;
1374  }
1375  // Buf is not optional, but offset is optional.
1376  assert(buf != NULL, "sanity check");
1377  buf[0] = '\0';
1378
1379  // Resolve function ptr literals first.
1380  addr = resolve_function_descriptor_to_code_pointer(addr);
1381  if (!addr) {
1382    return false;
1383  }
1384
1385  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1386  return Decoder::decode(addr, buf, buflen, offset, demangle);
1387}
1388
1389static int getModuleName(codeptr_t pc,                    // [in] program counter
1390                         char* p_name, size_t namelen,    // [out] optional: function name
1391                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1392                         ) {
1393
1394  if (p_name && namelen > 0) {
1395    *p_name = '\0';
1396  }
1397  if (p_errmsg && errmsglen > 0) {
1398    *p_errmsg = '\0';
1399  }
1400
1401  if (p_name && namelen > 0) {
1402    loaded_module_t lm;
1403    if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
1404      strncpy(p_name, lm.shortname, namelen);
1405      p_name[namelen - 1] = '\0';
1406    }
1407    return 0;
1408  }
1409
1410  return -1;
1411}
1412
1413bool os::dll_address_to_library_name(address addr, char* buf,
1414                                     int buflen, int* offset) {
1415  if (offset) {
1416    *offset = -1;
1417  }
1418  // Buf is not optional, but offset is optional.
1419  assert(buf != NULL, "sanity check");
1420  buf[0] = '\0';
1421
1422  // Resolve function ptr literals first.
1423  addr = resolve_function_descriptor_to_code_pointer(addr);
1424  if (!addr) {
1425    return false;
1426  }
1427
1428  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1429    return true;
1430  }
1431  return false;
1432}
1433
1434// Loads .dll/.so and in case of error it checks if .dll/.so was built
1435// for the same architecture as Hotspot is running on.
1436void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1437
1438  if (ebuf && ebuflen > 0) {
1439    ebuf[0] = '\0';
1440    ebuf[ebuflen - 1] = '\0';
1441  }
1442
1443  if (!filename || strlen(filename) == 0) {
1444    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1445    return NULL;
1446  }
1447
1448  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1449  void * result= ::dlopen(filename, RTLD_LAZY);
1450  if (result != NULL) {
1451    // Reload dll cache. Don't do this in signal handling.
1452    LoadedLibraries::reload();
1453    return result;
1454  } else {
1455    // error analysis when dlopen fails
1456    const char* const error_report = ::dlerror();
1457    if (error_report && ebuf && ebuflen > 0) {
1458      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1459               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1460    }
1461  }
1462  return NULL;
1463}
1464
1465void* os::dll_lookup(void* handle, const char* name) {
1466  void* res = dlsym(handle, name);
1467  return res;
1468}
1469
1470void* os::get_default_process_handle() {
1471  return (void*)::dlopen(NULL, RTLD_LAZY);
1472}
1473
1474void os::print_dll_info(outputStream *st) {
1475  st->print_cr("Dynamic libraries:");
1476  LoadedLibraries::print(st);
1477}
1478
1479void os::get_summary_os_info(char* buf, size_t buflen) {
1480  // There might be something more readable than uname results for AIX.
1481  struct utsname name;
1482  uname(&name);
1483  snprintf(buf, buflen, "%s %s", name.release, name.version);
1484}
1485
1486void os::print_os_info(outputStream* st) {
1487  st->print("OS:");
1488
1489  st->print("uname:");
1490  struct utsname name;
1491  uname(&name);
1492  st->print(name.sysname); st->print(" ");
1493  st->print(name.nodename); st->print(" ");
1494  st->print(name.release); st->print(" ");
1495  st->print(name.version); st->print(" ");
1496  st->print(name.machine);
1497  st->cr();
1498
1499  uint32_t ver = os::Aix::os_version();
1500  st->print_cr("AIX kernel version %u.%u.%u.%u",
1501               (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1502
1503  // rlimit
1504  st->print("rlimit:");
1505  struct rlimit rlim;
1506
1507  st->print(" STACK ");
1508  getrlimit(RLIMIT_STACK, &rlim);
1509  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1510  else st->print("%uk", rlim.rlim_cur >> 10);
1511
1512  st->print(", CORE ");
1513  getrlimit(RLIMIT_CORE, &rlim);
1514  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1515  else st->print("%uk", rlim.rlim_cur >> 10);
1516
1517  st->print(", NPROC ");
1518  st->print("%d", sysconf(_SC_CHILD_MAX));
1519
1520  st->print(", NOFILE ");
1521  getrlimit(RLIMIT_NOFILE, &rlim);
1522  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1523  else st->print("%d", rlim.rlim_cur);
1524
1525  st->print(", AS ");
1526  getrlimit(RLIMIT_AS, &rlim);
1527  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1528  else st->print("%uk", rlim.rlim_cur >> 10);
1529
1530  // Print limits on DATA, because it limits the C-heap.
1531  st->print(", DATA ");
1532  getrlimit(RLIMIT_DATA, &rlim);
1533  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1534  else st->print("%uk", rlim.rlim_cur >> 10);
1535  st->cr();
1536
1537  // load average
1538  st->print("load average:");
1539  double loadavg[3] = {-1.L, -1.L, -1.L};
1540  os::loadavg(loadavg, 3);
1541  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1542  st->cr();
1543
1544  // print wpar info
1545  libperfstat::wparinfo_t wi;
1546  if (libperfstat::get_wparinfo(&wi)) {
1547    st->print_cr("wpar info");
1548    st->print_cr("name: %s", wi.name);
1549    st->print_cr("id:   %d", wi.wpar_id);
1550    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1551  }
1552
1553  // print partition info
1554  libperfstat::partitioninfo_t pi;
1555  if (libperfstat::get_partitioninfo(&pi)) {
1556    st->print_cr("partition info");
1557    st->print_cr(" name: %s", pi.name);
1558  }
1559
1560}
1561
1562void os::print_memory_info(outputStream* st) {
1563
1564  st->print_cr("Memory:");
1565
1566  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1567    describe_pagesize(g_multipage_support.pagesize));
1568  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1569    describe_pagesize(g_multipage_support.datapsize));
1570  st->print_cr("  Text page size:                         %s",
1571    describe_pagesize(g_multipage_support.textpsize));
1572  st->print_cr("  Thread stack page size (pthread):       %s",
1573    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1574  st->print_cr("  Default shared memory page size:        %s",
1575    describe_pagesize(g_multipage_support.shmpsize));
1576  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1577    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1578  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1579    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1580  st->print_cr("  Multipage error: %d",
1581    g_multipage_support.error);
1582  st->cr();
1583  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1584  // not used in OpenJDK st->print_cr("  os::stack_page_size:    %s", describe_pagesize(os::stack_page_size()));
1585
1586  // print out LDR_CNTRL because it affects the default page sizes
1587  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1588  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1589
1590  // Print out EXTSHM because it is an unsupported setting.
1591  const char* const extshm = ::getenv("EXTSHM");
1592  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1593  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1594    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1595  }
1596
1597  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1598  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1599  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1600      aixthread_guardpages ? aixthread_guardpages : "<unset>");
1601
1602  os::Aix::meminfo_t mi;
1603  if (os::Aix::get_meminfo(&mi)) {
1604    char buffer[256];
1605    if (os::Aix::on_aix()) {
1606      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1607      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1608      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1609      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1610    } else {
1611      // PASE - Numbers are result of QWCRSSTS; they mean:
1612      // real_total: Sum of all system pools
1613      // real_free: always 0
1614      // pgsp_total: we take the size of the system ASP
1615      // pgsp_free: size of system ASP times percentage of system ASP unused
1616      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1617      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1618      st->print_cr("%% system asp used : " SIZE_FORMAT,
1619        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1620    }
1621    st->print_raw(buffer);
1622  }
1623  st->cr();
1624
1625  // Print segments allocated with os::reserve_memory.
1626  st->print_cr("internal virtual memory regions used by vm:");
1627  vmembk_print_on(st);
1628}
1629
1630// Get a string for the cpuinfo that is a summary of the cpu type
1631void os::get_summary_cpu_info(char* buf, size_t buflen) {
1632  // This looks good
1633  libperfstat::cpuinfo_t ci;
1634  if (libperfstat::get_cpuinfo(&ci)) {
1635    strncpy(buf, ci.version, buflen);
1636  } else {
1637    strncpy(buf, "AIX", buflen);
1638  }
1639}
1640
1641void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1642  st->print("CPU:");
1643  st->print("total %d", os::processor_count());
1644  // It's not safe to query number of active processors after crash.
1645  // st->print("(active %d)", os::active_processor_count());
1646  st->print(" %s", VM_Version::cpu_features());
1647  st->cr();
1648}
1649
1650static void print_signal_handler(outputStream* st, int sig,
1651                                 char* buf, size_t buflen);
1652
1653void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1654  st->print_cr("Signal Handlers:");
1655  print_signal_handler(st, SIGSEGV, buf, buflen);
1656  print_signal_handler(st, SIGBUS , buf, buflen);
1657  print_signal_handler(st, SIGFPE , buf, buflen);
1658  print_signal_handler(st, SIGPIPE, buf, buflen);
1659  print_signal_handler(st, SIGXFSZ, buf, buflen);
1660  print_signal_handler(st, SIGILL , buf, buflen);
1661  print_signal_handler(st, SR_signum, buf, buflen);
1662  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1663  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1664  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1665  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1666  print_signal_handler(st, SIGTRAP, buf, buflen);
1667  print_signal_handler(st, SIGDANGER, buf, buflen);
1668}
1669
1670static char saved_jvm_path[MAXPATHLEN] = {0};
1671
1672// Find the full path to the current module, libjvm.so.
1673void os::jvm_path(char *buf, jint buflen) {
1674  // Error checking.
1675  if (buflen < MAXPATHLEN) {
1676    assert(false, "must use a large-enough buffer");
1677    buf[0] = '\0';
1678    return;
1679  }
1680  // Lazy resolve the path to current module.
1681  if (saved_jvm_path[0] != 0) {
1682    strcpy(buf, saved_jvm_path);
1683    return;
1684  }
1685
1686  Dl_info dlinfo;
1687  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1688  assert(ret != 0, "cannot locate libjvm");
1689  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1690  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1691
1692  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1693  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1694}
1695
1696void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1697  // no prefix required, not even "_"
1698}
1699
1700void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1701  // no suffix required
1702}
1703
1704////////////////////////////////////////////////////////////////////////////////
1705// sun.misc.Signal support
1706
1707static volatile jint sigint_count = 0;
1708
1709static void
1710UserHandler(int sig, void *siginfo, void *context) {
1711  // 4511530 - sem_post is serialized and handled by the manager thread. When
1712  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1713  // don't want to flood the manager thread with sem_post requests.
1714  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1715    return;
1716
1717  // Ctrl-C is pressed during error reporting, likely because the error
1718  // handler fails to abort. Let VM die immediately.
1719  if (sig == SIGINT && is_error_reported()) {
1720    os::die();
1721  }
1722
1723  os::signal_notify(sig);
1724}
1725
1726void* os::user_handler() {
1727  return CAST_FROM_FN_PTR(void*, UserHandler);
1728}
1729
1730extern "C" {
1731  typedef void (*sa_handler_t)(int);
1732  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1733}
1734
1735void* os::signal(int signal_number, void* handler) {
1736  struct sigaction sigAct, oldSigAct;
1737
1738  sigfillset(&(sigAct.sa_mask));
1739
1740  // Do not block out synchronous signals in the signal handler.
1741  // Blocking synchronous signals only makes sense if you can really
1742  // be sure that those signals won't happen during signal handling,
1743  // when the blocking applies. Normal signal handlers are lean and
1744  // do not cause signals. But our signal handlers tend to be "risky"
1745  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1746  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1747  // by a SIGILL, which was blocked due to the signal mask. The process
1748  // just hung forever. Better to crash from a secondary signal than to hang.
1749  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1750  sigdelset(&(sigAct.sa_mask), SIGBUS);
1751  sigdelset(&(sigAct.sa_mask), SIGILL);
1752  sigdelset(&(sigAct.sa_mask), SIGFPE);
1753  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1754
1755  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1756
1757  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1758
1759  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1760    // -1 means registration failed
1761    return (void *)-1;
1762  }
1763
1764  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1765}
1766
1767void os::signal_raise(int signal_number) {
1768  ::raise(signal_number);
1769}
1770
1771//
1772// The following code is moved from os.cpp for making this
1773// code platform specific, which it is by its very nature.
1774//
1775
1776// Will be modified when max signal is changed to be dynamic
1777int os::sigexitnum_pd() {
1778  return NSIG;
1779}
1780
1781// a counter for each possible signal value
1782static volatile jint pending_signals[NSIG+1] = { 0 };
1783
1784// Wrapper functions for: sem_init(), sem_post(), sem_wait()
1785// On AIX, we use sem_init(), sem_post(), sem_wait()
1786// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1787// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1788// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1789// on AIX, msem_..() calls are suspected of causing problems.
1790static sem_t sig_sem;
1791static msemaphore* p_sig_msem = 0;
1792
1793static void local_sem_init() {
1794  if (os::Aix::on_aix()) {
1795    int rc = ::sem_init(&sig_sem, 0, 0);
1796    guarantee(rc != -1, "sem_init failed");
1797  } else {
1798    // Memory semaphores must live in shared mem.
1799    guarantee0(p_sig_msem == NULL);
1800    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1801    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1802    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1803  }
1804}
1805
1806static void local_sem_post() {
1807  static bool warn_only_once = false;
1808  if (os::Aix::on_aix()) {
1809    int rc = ::sem_post(&sig_sem);
1810    if (rc == -1 && !warn_only_once) {
1811      trcVerbose("sem_post failed (errno = %d, %s)", errno, strerror(errno));
1812      warn_only_once = true;
1813    }
1814  } else {
1815    guarantee0(p_sig_msem != NULL);
1816    int rc = ::msem_unlock(p_sig_msem, 0);
1817    if (rc == -1 && !warn_only_once) {
1818      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, strerror(errno));
1819      warn_only_once = true;
1820    }
1821  }
1822}
1823
1824static void local_sem_wait() {
1825  static bool warn_only_once = false;
1826  if (os::Aix::on_aix()) {
1827    int rc = ::sem_wait(&sig_sem);
1828    if (rc == -1 && !warn_only_once) {
1829      trcVerbose("sem_wait failed (errno = %d, %s)", errno, strerror(errno));
1830      warn_only_once = true;
1831    }
1832  } else {
1833    guarantee0(p_sig_msem != NULL); // must init before use
1834    int rc = ::msem_lock(p_sig_msem, 0);
1835    if (rc == -1 && !warn_only_once) {
1836      trcVerbose("msem_lock failed (errno = %d, %s)", errno, strerror(errno));
1837      warn_only_once = true;
1838    }
1839  }
1840}
1841
1842void os::signal_init_pd() {
1843  // Initialize signal structures
1844  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1845
1846  // Initialize signal semaphore
1847  local_sem_init();
1848}
1849
1850void os::signal_notify(int sig) {
1851  Atomic::inc(&pending_signals[sig]);
1852  local_sem_post();
1853}
1854
1855static int check_pending_signals(bool wait) {
1856  Atomic::store(0, &sigint_count);
1857  for (;;) {
1858    for (int i = 0; i < NSIG + 1; i++) {
1859      jint n = pending_signals[i];
1860      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1861        return i;
1862      }
1863    }
1864    if (!wait) {
1865      return -1;
1866    }
1867    JavaThread *thread = JavaThread::current();
1868    ThreadBlockInVM tbivm(thread);
1869
1870    bool threadIsSuspended;
1871    do {
1872      thread->set_suspend_equivalent();
1873      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1874
1875      local_sem_wait();
1876
1877      // were we externally suspended while we were waiting?
1878      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1879      if (threadIsSuspended) {
1880        //
1881        // The semaphore has been incremented, but while we were waiting
1882        // another thread suspended us. We don't want to continue running
1883        // while suspended because that would surprise the thread that
1884        // suspended us.
1885        //
1886
1887        local_sem_post();
1888
1889        thread->java_suspend_self();
1890      }
1891    } while (threadIsSuspended);
1892  }
1893}
1894
1895int os::signal_lookup() {
1896  return check_pending_signals(false);
1897}
1898
1899int os::signal_wait() {
1900  return check_pending_signals(true);
1901}
1902
1903////////////////////////////////////////////////////////////////////////////////
1904// Virtual Memory
1905
1906// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1907
1908#define VMEM_MAPPED  1
1909#define VMEM_SHMATED 2
1910
1911struct vmembk_t {
1912  int type;         // 1 - mmap, 2 - shmat
1913  char* addr;
1914  size_t size;      // Real size, may be larger than usersize.
1915  size_t pagesize;  // page size of area
1916  vmembk_t* next;
1917
1918  bool contains_addr(char* p) const {
1919    return p >= addr && p < (addr + size);
1920  }
1921
1922  bool contains_range(char* p, size_t s) const {
1923    return contains_addr(p) && contains_addr(p + s - 1);
1924  }
1925
1926  void print_on(outputStream* os) const {
1927    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1928      " bytes, %d %s pages), %s",
1929      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1930      (type == VMEM_SHMATED ? "shmat" : "mmap")
1931    );
1932  }
1933
1934  // Check that range is a sub range of memory block (or equal to memory block);
1935  // also check that range is fully page aligned to the page size if the block.
1936  void assert_is_valid_subrange(char* p, size_t s) const {
1937    if (!contains_range(p, s)) {
1938      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1939              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1940              p, p + s, addr, addr + size);
1941      guarantee0(false);
1942    }
1943    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1944      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1945              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1946      guarantee0(false);
1947    }
1948  }
1949};
1950
1951static struct {
1952  vmembk_t* first;
1953  MiscUtils::CritSect cs;
1954} vmem;
1955
1956static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1957  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1958  assert0(p);
1959  if (p) {
1960    MiscUtils::AutoCritSect lck(&vmem.cs);
1961    p->addr = addr; p->size = size;
1962    p->pagesize = pagesize;
1963    p->type = type;
1964    p->next = vmem.first;
1965    vmem.first = p;
1966  }
1967}
1968
1969static vmembk_t* vmembk_find(char* addr) {
1970  MiscUtils::AutoCritSect lck(&vmem.cs);
1971  for (vmembk_t* p = vmem.first; p; p = p->next) {
1972    if (p->addr <= addr && (p->addr + p->size) > addr) {
1973      return p;
1974    }
1975  }
1976  return NULL;
1977}
1978
1979static void vmembk_remove(vmembk_t* p0) {
1980  MiscUtils::AutoCritSect lck(&vmem.cs);
1981  assert0(p0);
1982  assert0(vmem.first); // List should not be empty.
1983  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1984    if (*pp == p0) {
1985      *pp = p0->next;
1986      ::free(p0);
1987      return;
1988    }
1989  }
1990  assert0(false); // Not found?
1991}
1992
1993static void vmembk_print_on(outputStream* os) {
1994  MiscUtils::AutoCritSect lck(&vmem.cs);
1995  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1996    vmi->print_on(os);
1997    os->cr();
1998  }
1999}
2000
2001// Reserve and attach a section of System V memory.
2002// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
2003// address. Failing that, it will attach the memory anywhere.
2004// If <requested_addr> is NULL, function will attach the memory anywhere.
2005//
2006// <alignment_hint> is being ignored by this function. It is very probable however that the
2007// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
2008// Should this be not enogh, we can put more work into it.
2009static char* reserve_shmated_memory (
2010  size_t bytes,
2011  char* requested_addr,
2012  size_t alignment_hint) {
2013
2014  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
2015    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
2016    bytes, requested_addr, alignment_hint);
2017
2018  // Either give me wish address or wish alignment but not both.
2019  assert0(!(requested_addr != NULL && alignment_hint != 0));
2020
2021  // We must prevent anyone from attaching too close to the
2022  // BRK because that may cause malloc OOM.
2023  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2024    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2025      "Will attach anywhere.", requested_addr);
2026    // Act like the OS refused to attach there.
2027    requested_addr = NULL;
2028  }
2029
2030  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2031  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2032  if (os::Aix::on_pase_V5R4_or_older()) {
2033    ShouldNotReachHere();
2034  }
2035
2036  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2037  const size_t size = align_size_up(bytes, SIZE_64K);
2038
2039  // Reserve the shared segment.
2040  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2041  if (shmid == -1) {
2042    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2043    return NULL;
2044  }
2045
2046  // Important note:
2047  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2048  // We must right after attaching it remove it from the system. System V shm segments are global and
2049  // survive the process.
2050  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2051
2052  struct shmid_ds shmbuf;
2053  memset(&shmbuf, 0, sizeof(shmbuf));
2054  shmbuf.shm_pagesize = SIZE_64K;
2055  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2056    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2057               size / SIZE_64K, errno);
2058    // I want to know if this ever happens.
2059    assert(false, "failed to set page size for shmat");
2060  }
2061
2062  // Now attach the shared segment.
2063  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2064  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2065  // were not a segment boundary.
2066  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2067  const int errno_shmat = errno;
2068
2069  // (A) Right after shmat and before handing shmat errors delete the shm segment.
2070  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2071    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2072    assert(false, "failed to remove shared memory segment!");
2073  }
2074
2075  // Handle shmat error. If we failed to attach, just return.
2076  if (addr == (char*)-1) {
2077    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2078    return NULL;
2079  }
2080
2081  // Just for info: query the real page size. In case setting the page size did not
2082  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2083  const size_t real_pagesize = os::Aix::query_pagesize(addr);
2084  if (real_pagesize != shmbuf.shm_pagesize) {
2085    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2086  }
2087
2088  if (addr) {
2089    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2090      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2091  } else {
2092    if (requested_addr != NULL) {
2093      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2094    } else {
2095      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2096    }
2097  }
2098
2099  // book-keeping
2100  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2101  assert0(is_aligned_to(addr, os::vm_page_size()));
2102
2103  return addr;
2104}
2105
2106static bool release_shmated_memory(char* addr, size_t size) {
2107
2108  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2109    addr, addr + size - 1);
2110
2111  bool rc = false;
2112
2113  // TODO: is there a way to verify shm size without doing bookkeeping?
2114  if (::shmdt(addr) != 0) {
2115    trcVerbose("error (%d).", errno);
2116  } else {
2117    trcVerbose("ok.");
2118    rc = true;
2119  }
2120  return rc;
2121}
2122
2123static bool uncommit_shmated_memory(char* addr, size_t size) {
2124  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2125    addr, addr + size - 1);
2126
2127  const bool rc = my_disclaim64(addr, size);
2128
2129  if (!rc) {
2130    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2131    return false;
2132  }
2133  return true;
2134}
2135
2136////////////////////////////////  mmap-based routines /////////////////////////////////
2137
2138// Reserve memory via mmap.
2139// If <requested_addr> is given, an attempt is made to attach at the given address.
2140// Failing that, memory is allocated at any address.
2141// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2142// allocate at an address aligned with the given alignment. Failing that, memory
2143// is aligned anywhere.
2144static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2145  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2146    "alignment_hint " UINTX_FORMAT "...",
2147    bytes, requested_addr, alignment_hint);
2148
2149  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2150  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2151    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2152    return NULL;
2153  }
2154
2155  // We must prevent anyone from attaching too close to the
2156  // BRK because that may cause malloc OOM.
2157  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2158    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2159      "Will attach anywhere.", requested_addr);
2160    // Act like the OS refused to attach there.
2161    requested_addr = NULL;
2162  }
2163
2164  // Specify one or the other but not both.
2165  assert0(!(requested_addr != NULL && alignment_hint > 0));
2166
2167  // In 64K mode, we claim the global page size (os::vm_page_size())
2168  // is 64K. This is one of the few points where that illusion may
2169  // break, because mmap() will always return memory aligned to 4K. So
2170  // we must ensure we only ever return memory aligned to 64k.
2171  if (alignment_hint) {
2172    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2173  } else {
2174    alignment_hint = os::vm_page_size();
2175  }
2176
2177  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2178  const size_t size = align_size_up(bytes, os::vm_page_size());
2179
2180  // alignment: Allocate memory large enough to include an aligned range of the right size and
2181  // cut off the leading and trailing waste pages.
2182  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2183  const size_t extra_size = size + alignment_hint;
2184
2185  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2186  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2187  int flags = MAP_ANONYMOUS | MAP_SHARED;
2188
2189  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2190  // it means if wishaddress is given but MAP_FIXED is not set.
2191  //
2192  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2193  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2194  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2195  // get clobbered.
2196  if (requested_addr != NULL) {
2197    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2198      flags |= MAP_FIXED;
2199    }
2200  }
2201
2202  char* addr = (char*)::mmap(requested_addr, extra_size,
2203      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2204
2205  if (addr == MAP_FAILED) {
2206    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2207    return NULL;
2208  }
2209
2210  // Handle alignment.
2211  char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2212  const size_t waste_pre = addr_aligned - addr;
2213  char* const addr_aligned_end = addr_aligned + size;
2214  const size_t waste_post = extra_size - waste_pre - size;
2215  if (waste_pre > 0) {
2216    ::munmap(addr, waste_pre);
2217  }
2218  if (waste_post > 0) {
2219    ::munmap(addr_aligned_end, waste_post);
2220  }
2221  addr = addr_aligned;
2222
2223  if (addr) {
2224    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2225      addr, addr + bytes, bytes);
2226  } else {
2227    if (requested_addr != NULL) {
2228      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2229    } else {
2230      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2231    }
2232  }
2233
2234  // bookkeeping
2235  vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2236
2237  // Test alignment, see above.
2238  assert0(is_aligned_to(addr, os::vm_page_size()));
2239
2240  return addr;
2241}
2242
2243static bool release_mmaped_memory(char* addr, size_t size) {
2244  assert0(is_aligned_to(addr, os::vm_page_size()));
2245  assert0(is_aligned_to(size, os::vm_page_size()));
2246
2247  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2248    addr, addr + size - 1);
2249  bool rc = false;
2250
2251  if (::munmap(addr, size) != 0) {
2252    trcVerbose("failed (%d)\n", errno);
2253    rc = false;
2254  } else {
2255    trcVerbose("ok.");
2256    rc = true;
2257  }
2258
2259  return rc;
2260}
2261
2262static bool uncommit_mmaped_memory(char* addr, size_t size) {
2263
2264  assert0(is_aligned_to(addr, os::vm_page_size()));
2265  assert0(is_aligned_to(size, os::vm_page_size()));
2266
2267  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2268    addr, addr + size - 1);
2269  bool rc = false;
2270
2271  // Uncommit mmap memory with msync MS_INVALIDATE.
2272  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2273    trcVerbose("failed (%d)\n", errno);
2274    rc = false;
2275  } else {
2276    trcVerbose("ok.");
2277    rc = true;
2278  }
2279
2280  return rc;
2281}
2282
2283int os::vm_page_size() {
2284  // Seems redundant as all get out.
2285  assert(os::Aix::page_size() != -1, "must call os::init");
2286  return os::Aix::page_size();
2287}
2288
2289// Aix allocates memory by pages.
2290int os::vm_allocation_granularity() {
2291  assert(os::Aix::page_size() != -1, "must call os::init");
2292  return os::Aix::page_size();
2293}
2294
2295#ifdef PRODUCT
2296static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2297                                    int err) {
2298  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2299          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2300          strerror(err), err);
2301}
2302#endif
2303
2304void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2305                                  const char* mesg) {
2306  assert(mesg != NULL, "mesg must be specified");
2307  if (!pd_commit_memory(addr, size, exec)) {
2308    // Add extra info in product mode for vm_exit_out_of_memory():
2309    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2310    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2311  }
2312}
2313
2314bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2315
2316  assert(is_aligned_to(addr, os::vm_page_size()),
2317    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2318    p2i(addr), os::vm_page_size());
2319  assert(is_aligned_to(size, os::vm_page_size()),
2320    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2321    size, os::vm_page_size());
2322
2323  vmembk_t* const vmi = vmembk_find(addr);
2324  guarantee0(vmi);
2325  vmi->assert_is_valid_subrange(addr, size);
2326
2327  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2328
2329  if (UseExplicitCommit) {
2330    // AIX commits memory on touch. So, touch all pages to be committed.
2331    for (char* p = addr; p < (addr + size); p += SIZE_4K) {
2332      *p = '\0';
2333    }
2334  }
2335
2336  return true;
2337}
2338
2339bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2340  return pd_commit_memory(addr, size, exec);
2341}
2342
2343void os::pd_commit_memory_or_exit(char* addr, size_t size,
2344                                  size_t alignment_hint, bool exec,
2345                                  const char* mesg) {
2346  // Alignment_hint is ignored on this OS.
2347  pd_commit_memory_or_exit(addr, size, exec, mesg);
2348}
2349
2350bool os::pd_uncommit_memory(char* addr, size_t size) {
2351  assert(is_aligned_to(addr, os::vm_page_size()),
2352    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2353    p2i(addr), os::vm_page_size());
2354  assert(is_aligned_to(size, os::vm_page_size()),
2355    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2356    size, os::vm_page_size());
2357
2358  // Dynamically do different things for mmap/shmat.
2359  const vmembk_t* const vmi = vmembk_find(addr);
2360  guarantee0(vmi);
2361  vmi->assert_is_valid_subrange(addr, size);
2362
2363  if (vmi->type == VMEM_SHMATED) {
2364    return uncommit_shmated_memory(addr, size);
2365  } else {
2366    return uncommit_mmaped_memory(addr, size);
2367  }
2368}
2369
2370bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2371  // Do not call this; no need to commit stack pages on AIX.
2372  ShouldNotReachHere();
2373  return true;
2374}
2375
2376bool os::remove_stack_guard_pages(char* addr, size_t size) {
2377  // Do not call this; no need to commit stack pages on AIX.
2378  ShouldNotReachHere();
2379  return true;
2380}
2381
2382void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2383}
2384
2385void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2386}
2387
2388void os::numa_make_global(char *addr, size_t bytes) {
2389}
2390
2391void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2392}
2393
2394bool os::numa_topology_changed() {
2395  return false;
2396}
2397
2398size_t os::numa_get_groups_num() {
2399  return 1;
2400}
2401
2402int os::numa_get_group_id() {
2403  return 0;
2404}
2405
2406size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2407  if (size > 0) {
2408    ids[0] = 0;
2409    return 1;
2410  }
2411  return 0;
2412}
2413
2414bool os::get_page_info(char *start, page_info* info) {
2415  return false;
2416}
2417
2418char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2419  return end;
2420}
2421
2422// Reserves and attaches a shared memory segment.
2423// Will assert if a wish address is given and could not be obtained.
2424char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2425
2426  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2427  // thereby clobbering old mappings at that place. That is probably
2428  // not intended, never used and almost certainly an error were it
2429  // ever be used this way (to try attaching at a specified address
2430  // without clobbering old mappings an alternate API exists,
2431  // os::attempt_reserve_memory_at()).
2432  // Instead of mimicking the dangerous coding of the other platforms, here I
2433  // just ignore the request address (release) or assert(debug).
2434  assert0(requested_addr == NULL);
2435
2436  // Always round to os::vm_page_size(), which may be larger than 4K.
2437  bytes = align_size_up(bytes, os::vm_page_size());
2438  const size_t alignment_hint0 =
2439    alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2440
2441  // In 4K mode always use mmap.
2442  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2443  if (os::vm_page_size() == SIZE_4K) {
2444    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2445  } else {
2446    if (bytes >= Use64KPagesThreshold) {
2447      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2448    } else {
2449      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2450    }
2451  }
2452}
2453
2454bool os::pd_release_memory(char* addr, size_t size) {
2455
2456  // Dynamically do different things for mmap/shmat.
2457  vmembk_t* const vmi = vmembk_find(addr);
2458  guarantee0(vmi);
2459
2460  // Always round to os::vm_page_size(), which may be larger than 4K.
2461  size = align_size_up(size, os::vm_page_size());
2462  addr = (char *)align_ptr_up(addr, os::vm_page_size());
2463
2464  bool rc = false;
2465  bool remove_bookkeeping = false;
2466  if (vmi->type == VMEM_SHMATED) {
2467    // For shmatted memory, we do:
2468    // - If user wants to release the whole range, release the memory (shmdt).
2469    // - If user only wants to release a partial range, uncommit (disclaim) that
2470    //   range. That way, at least, we do not use memory anymore (bust still page
2471    //   table space).
2472    vmi->assert_is_valid_subrange(addr, size);
2473    if (addr == vmi->addr && size == vmi->size) {
2474      rc = release_shmated_memory(addr, size);
2475      remove_bookkeeping = true;
2476    } else {
2477      rc = uncommit_shmated_memory(addr, size);
2478    }
2479  } else {
2480    // User may unmap partial regions but region has to be fully contained.
2481#ifdef ASSERT
2482    vmi->assert_is_valid_subrange(addr, size);
2483#endif
2484    rc = release_mmaped_memory(addr, size);
2485    remove_bookkeeping = true;
2486  }
2487
2488  // update bookkeeping
2489  if (rc && remove_bookkeeping) {
2490    vmembk_remove(vmi);
2491  }
2492
2493  return rc;
2494}
2495
2496static bool checked_mprotect(char* addr, size_t size, int prot) {
2497
2498  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2499  // not tell me if protection failed when trying to protect an un-protectable range.
2500  //
2501  // This means if the memory was allocated using shmget/shmat, protection wont work
2502  // but mprotect will still return 0:
2503  //
2504  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2505
2506  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2507
2508  if (!rc) {
2509    const char* const s_errno = strerror(errno);
2510    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2511    return false;
2512  }
2513
2514  // mprotect success check
2515  //
2516  // Mprotect said it changed the protection but can I believe it?
2517  //
2518  // To be sure I need to check the protection afterwards. Try to
2519  // read from protected memory and check whether that causes a segfault.
2520  //
2521  if (!os::Aix::xpg_sus_mode()) {
2522
2523    if (CanUseSafeFetch32()) {
2524
2525      const bool read_protected =
2526        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2527         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2528
2529      if (prot & PROT_READ) {
2530        rc = !read_protected;
2531      } else {
2532        rc = read_protected;
2533      }
2534
2535      if (!rc) {
2536        if (os::Aix::on_pase()) {
2537          // There is an issue on older PASE systems where mprotect() will return success but the
2538          // memory will not be protected.
2539          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2540          // machines; we only see it rarely, when using mprotect() to protect the guard page of
2541          // a stack. It is an OS error.
2542          //
2543          // A valid strategy is just to try again. This usually works. :-/
2544
2545          ::usleep(1000);
2546          if (::mprotect(addr, size, prot) == 0) {
2547            const bool read_protected_2 =
2548              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2549              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2550            rc = true;
2551          }
2552        }
2553      }
2554    }
2555  }
2556
2557  assert(rc == true, "mprotect failed.");
2558
2559  return rc;
2560}
2561
2562// Set protections specified
2563bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2564  unsigned int p = 0;
2565  switch (prot) {
2566  case MEM_PROT_NONE: p = PROT_NONE; break;
2567  case MEM_PROT_READ: p = PROT_READ; break;
2568  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2569  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2570  default:
2571    ShouldNotReachHere();
2572  }
2573  // is_committed is unused.
2574  return checked_mprotect(addr, size, p);
2575}
2576
2577bool os::guard_memory(char* addr, size_t size) {
2578  return checked_mprotect(addr, size, PROT_NONE);
2579}
2580
2581bool os::unguard_memory(char* addr, size_t size) {
2582  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2583}
2584
2585// Large page support
2586
2587static size_t _large_page_size = 0;
2588
2589// Enable large page support if OS allows that.
2590void os::large_page_init() {
2591  return; // Nothing to do. See query_multipage_support and friends.
2592}
2593
2594char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2595  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2596  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2597  // so this is not needed.
2598  assert(false, "should not be called on AIX");
2599  return NULL;
2600}
2601
2602bool os::release_memory_special(char* base, size_t bytes) {
2603  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2604  Unimplemented();
2605  return false;
2606}
2607
2608size_t os::large_page_size() {
2609  return _large_page_size;
2610}
2611
2612bool os::can_commit_large_page_memory() {
2613  // Does not matter, we do not support huge pages.
2614  return false;
2615}
2616
2617bool os::can_execute_large_page_memory() {
2618  // Does not matter, we do not support huge pages.
2619  return false;
2620}
2621
2622// Reserve memory at an arbitrary address, only if that area is
2623// available (and not reserved for something else).
2624char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2625  char* addr = NULL;
2626
2627  // Always round to os::vm_page_size(), which may be larger than 4K.
2628  bytes = align_size_up(bytes, os::vm_page_size());
2629
2630  // In 4K mode always use mmap.
2631  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2632  if (os::vm_page_size() == SIZE_4K) {
2633    return reserve_mmaped_memory(bytes, requested_addr, 0);
2634  } else {
2635    if (bytes >= Use64KPagesThreshold) {
2636      return reserve_shmated_memory(bytes, requested_addr, 0);
2637    } else {
2638      return reserve_mmaped_memory(bytes, requested_addr, 0);
2639    }
2640  }
2641
2642  return addr;
2643}
2644
2645size_t os::read(int fd, void *buf, unsigned int nBytes) {
2646  return ::read(fd, buf, nBytes);
2647}
2648
2649size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2650  return ::pread(fd, buf, nBytes, offset);
2651}
2652
2653void os::naked_short_sleep(jlong ms) {
2654  struct timespec req;
2655
2656  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2657  req.tv_sec = 0;
2658  if (ms > 0) {
2659    req.tv_nsec = (ms % 1000) * 1000000;
2660  }
2661  else {
2662    req.tv_nsec = 1;
2663  }
2664
2665  nanosleep(&req, NULL);
2666
2667  return;
2668}
2669
2670// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2671void os::infinite_sleep() {
2672  while (true) {    // sleep forever ...
2673    ::sleep(100);   // ... 100 seconds at a time
2674  }
2675}
2676
2677// Used to convert frequent JVM_Yield() to nops
2678bool os::dont_yield() {
2679  return DontYieldALot;
2680}
2681
2682void os::naked_yield() {
2683  sched_yield();
2684}
2685
2686////////////////////////////////////////////////////////////////////////////////
2687// thread priority support
2688
2689// From AIX manpage to pthread_setschedparam
2690// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2691//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2692//
2693// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2694// range from 40 to 80, where 40 is the least favored priority and 80
2695// is the most favored."
2696//
2697// (Actually, I doubt this even has an impact on AIX, as we do kernel
2698// scheduling there; however, this still leaves iSeries.)
2699//
2700// We use the same values for AIX and PASE.
2701int os::java_to_os_priority[CriticalPriority + 1] = {
2702  54,             // 0 Entry should never be used
2703
2704  55,             // 1 MinPriority
2705  55,             // 2
2706  56,             // 3
2707
2708  56,             // 4
2709  57,             // 5 NormPriority
2710  57,             // 6
2711
2712  58,             // 7
2713  58,             // 8
2714  59,             // 9 NearMaxPriority
2715
2716  60,             // 10 MaxPriority
2717
2718  60              // 11 CriticalPriority
2719};
2720
2721OSReturn os::set_native_priority(Thread* thread, int newpri) {
2722  if (!UseThreadPriorities) return OS_OK;
2723  pthread_t thr = thread->osthread()->pthread_id();
2724  int policy = SCHED_OTHER;
2725  struct sched_param param;
2726  param.sched_priority = newpri;
2727  int ret = pthread_setschedparam(thr, policy, &param);
2728
2729  if (ret != 0) {
2730    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2731        (int)thr, newpri, ret, strerror(ret));
2732  }
2733  return (ret == 0) ? OS_OK : OS_ERR;
2734}
2735
2736OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2737  if (!UseThreadPriorities) {
2738    *priority_ptr = java_to_os_priority[NormPriority];
2739    return OS_OK;
2740  }
2741  pthread_t thr = thread->osthread()->pthread_id();
2742  int policy = SCHED_OTHER;
2743  struct sched_param param;
2744  int ret = pthread_getschedparam(thr, &policy, &param);
2745  *priority_ptr = param.sched_priority;
2746
2747  return (ret == 0) ? OS_OK : OS_ERR;
2748}
2749
2750// Hint to the underlying OS that a task switch would not be good.
2751// Void return because it's a hint and can fail.
2752void os::hint_no_preempt() {}
2753
2754////////////////////////////////////////////////////////////////////////////////
2755// suspend/resume support
2756
2757//  the low-level signal-based suspend/resume support is a remnant from the
2758//  old VM-suspension that used to be for java-suspension, safepoints etc,
2759//  within hotspot. Now there is a single use-case for this:
2760//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2761//      that runs in the watcher thread.
2762//  The remaining code is greatly simplified from the more general suspension
2763//  code that used to be used.
2764//
2765//  The protocol is quite simple:
2766//  - suspend:
2767//      - sends a signal to the target thread
2768//      - polls the suspend state of the osthread using a yield loop
2769//      - target thread signal handler (SR_handler) sets suspend state
2770//        and blocks in sigsuspend until continued
2771//  - resume:
2772//      - sets target osthread state to continue
2773//      - sends signal to end the sigsuspend loop in the SR_handler
2774//
2775//  Note that the SR_lock plays no role in this suspend/resume protocol.
2776//
2777
2778static void resume_clear_context(OSThread *osthread) {
2779  osthread->set_ucontext(NULL);
2780  osthread->set_siginfo(NULL);
2781}
2782
2783static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2784  osthread->set_ucontext(context);
2785  osthread->set_siginfo(siginfo);
2786}
2787
2788//
2789// Handler function invoked when a thread's execution is suspended or
2790// resumed. We have to be careful that only async-safe functions are
2791// called here (Note: most pthread functions are not async safe and
2792// should be avoided.)
2793//
2794// Note: sigwait() is a more natural fit than sigsuspend() from an
2795// interface point of view, but sigwait() prevents the signal hander
2796// from being run. libpthread would get very confused by not having
2797// its signal handlers run and prevents sigwait()'s use with the
2798// mutex granting granting signal.
2799//
2800// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2801//
2802static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2803  // Save and restore errno to avoid confusing native code with EINTR
2804  // after sigsuspend.
2805  int old_errno = errno;
2806
2807  Thread* thread = Thread::current();
2808  OSThread* osthread = thread->osthread();
2809  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2810
2811  os::SuspendResume::State current = osthread->sr.state();
2812  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2813    suspend_save_context(osthread, siginfo, context);
2814
2815    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2816    os::SuspendResume::State state = osthread->sr.suspended();
2817    if (state == os::SuspendResume::SR_SUSPENDED) {
2818      sigset_t suspend_set;  // signals for sigsuspend()
2819
2820      // get current set of blocked signals and unblock resume signal
2821      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2822      sigdelset(&suspend_set, SR_signum);
2823
2824      // wait here until we are resumed
2825      while (1) {
2826        sigsuspend(&suspend_set);
2827
2828        os::SuspendResume::State result = osthread->sr.running();
2829        if (result == os::SuspendResume::SR_RUNNING) {
2830          break;
2831        }
2832      }
2833
2834    } else if (state == os::SuspendResume::SR_RUNNING) {
2835      // request was cancelled, continue
2836    } else {
2837      ShouldNotReachHere();
2838    }
2839
2840    resume_clear_context(osthread);
2841  } else if (current == os::SuspendResume::SR_RUNNING) {
2842    // request was cancelled, continue
2843  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2844    // ignore
2845  } else {
2846    ShouldNotReachHere();
2847  }
2848
2849  errno = old_errno;
2850}
2851
2852static int SR_initialize() {
2853  struct sigaction act;
2854  char *s;
2855  // Get signal number to use for suspend/resume
2856  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2857    int sig = ::strtol(s, 0, 10);
2858    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2859        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2860      SR_signum = sig;
2861    } else {
2862      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2863              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2864    }
2865  }
2866
2867  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2868        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2869
2870  sigemptyset(&SR_sigset);
2871  sigaddset(&SR_sigset, SR_signum);
2872
2873  // Set up signal handler for suspend/resume.
2874  act.sa_flags = SA_RESTART|SA_SIGINFO;
2875  act.sa_handler = (void (*)(int)) SR_handler;
2876
2877  // SR_signum is blocked by default.
2878  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2879
2880  if (sigaction(SR_signum, &act, 0) == -1) {
2881    return -1;
2882  }
2883
2884  // Save signal flag
2885  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2886  return 0;
2887}
2888
2889static int SR_finalize() {
2890  return 0;
2891}
2892
2893static int sr_notify(OSThread* osthread) {
2894  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2895  assert_status(status == 0, status, "pthread_kill");
2896  return status;
2897}
2898
2899// "Randomly" selected value for how long we want to spin
2900// before bailing out on suspending a thread, also how often
2901// we send a signal to a thread we want to resume
2902static const int RANDOMLY_LARGE_INTEGER = 1000000;
2903static const int RANDOMLY_LARGE_INTEGER2 = 100;
2904
2905// returns true on success and false on error - really an error is fatal
2906// but this seems the normal response to library errors
2907static bool do_suspend(OSThread* osthread) {
2908  assert(osthread->sr.is_running(), "thread should be running");
2909  // mark as suspended and send signal
2910
2911  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2912    // failed to switch, state wasn't running?
2913    ShouldNotReachHere();
2914    return false;
2915  }
2916
2917  if (sr_notify(osthread) != 0) {
2918    // try to cancel, switch to running
2919
2920    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2921    if (result == os::SuspendResume::SR_RUNNING) {
2922      // cancelled
2923      return false;
2924    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2925      // somehow managed to suspend
2926      return true;
2927    } else {
2928      ShouldNotReachHere();
2929      return false;
2930    }
2931  }
2932
2933  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2934
2935  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2936    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2937      os::naked_yield();
2938    }
2939
2940    // timeout, try to cancel the request
2941    if (n >= RANDOMLY_LARGE_INTEGER) {
2942      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2943      if (cancelled == os::SuspendResume::SR_RUNNING) {
2944        return false;
2945      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2946        return true;
2947      } else {
2948        ShouldNotReachHere();
2949        return false;
2950      }
2951    }
2952  }
2953
2954  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2955  return true;
2956}
2957
2958static void do_resume(OSThread* osthread) {
2959  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2960
2961  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2962    // failed to switch to WAKEUP_REQUEST
2963    ShouldNotReachHere();
2964    return;
2965  }
2966
2967  while (!osthread->sr.is_running()) {
2968    if (sr_notify(osthread) == 0) {
2969      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2970        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2971          os::naked_yield();
2972        }
2973      }
2974    } else {
2975      ShouldNotReachHere();
2976    }
2977  }
2978
2979  guarantee(osthread->sr.is_running(), "Must be running!");
2980}
2981
2982///////////////////////////////////////////////////////////////////////////////////
2983// signal handling (except suspend/resume)
2984
2985// This routine may be used by user applications as a "hook" to catch signals.
2986// The user-defined signal handler must pass unrecognized signals to this
2987// routine, and if it returns true (non-zero), then the signal handler must
2988// return immediately. If the flag "abort_if_unrecognized" is true, then this
2989// routine will never retun false (zero), but instead will execute a VM panic
2990// routine kill the process.
2991//
2992// If this routine returns false, it is OK to call it again. This allows
2993// the user-defined signal handler to perform checks either before or after
2994// the VM performs its own checks. Naturally, the user code would be making
2995// a serious error if it tried to handle an exception (such as a null check
2996// or breakpoint) that the VM was generating for its own correct operation.
2997//
2998// This routine may recognize any of the following kinds of signals:
2999//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3000// It should be consulted by handlers for any of those signals.
3001//
3002// The caller of this routine must pass in the three arguments supplied
3003// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3004// field of the structure passed to sigaction(). This routine assumes that
3005// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3006//
3007// Note that the VM will print warnings if it detects conflicting signal
3008// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3009//
3010extern "C" JNIEXPORT int
3011JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3012
3013// Set thread signal mask (for some reason on AIX sigthreadmask() seems
3014// to be the thing to call; documentation is not terribly clear about whether
3015// pthread_sigmask also works, and if it does, whether it does the same.
3016bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3017  const int rc = ::pthread_sigmask(how, set, oset);
3018  // return value semantics differ slightly for error case:
3019  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3020  // (so, pthread_sigmask is more theadsafe for error handling)
3021  // But success is always 0.
3022  return rc == 0 ? true : false;
3023}
3024
3025// Function to unblock all signals which are, according
3026// to POSIX, typical program error signals. If they happen while being blocked,
3027// they typically will bring down the process immediately.
3028bool unblock_program_error_signals() {
3029  sigset_t set;
3030  ::sigemptyset(&set);
3031  ::sigaddset(&set, SIGILL);
3032  ::sigaddset(&set, SIGBUS);
3033  ::sigaddset(&set, SIGFPE);
3034  ::sigaddset(&set, SIGSEGV);
3035  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3036}
3037
3038// Renamed from 'signalHandler' to avoid collision with other shared libs.
3039void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3040  assert(info != NULL && uc != NULL, "it must be old kernel");
3041
3042  // Never leave program error signals blocked;
3043  // on all our platforms they would bring down the process immediately when
3044  // getting raised while being blocked.
3045  unblock_program_error_signals();
3046
3047  int orig_errno = errno;  // Preserve errno value over signal handler.
3048  JVM_handle_aix_signal(sig, info, uc, true);
3049  errno = orig_errno;
3050}
3051
3052// This boolean allows users to forward their own non-matching signals
3053// to JVM_handle_aix_signal, harmlessly.
3054bool os::Aix::signal_handlers_are_installed = false;
3055
3056// For signal-chaining
3057struct sigaction sigact[NSIG];
3058sigset_t sigs;
3059bool os::Aix::libjsig_is_loaded = false;
3060typedef struct sigaction *(*get_signal_t)(int);
3061get_signal_t os::Aix::get_signal_action = NULL;
3062
3063struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3064  struct sigaction *actp = NULL;
3065
3066  if (libjsig_is_loaded) {
3067    // Retrieve the old signal handler from libjsig
3068    actp = (*get_signal_action)(sig);
3069  }
3070  if (actp == NULL) {
3071    // Retrieve the preinstalled signal handler from jvm
3072    actp = get_preinstalled_handler(sig);
3073  }
3074
3075  return actp;
3076}
3077
3078static bool call_chained_handler(struct sigaction *actp, int sig,
3079                                 siginfo_t *siginfo, void *context) {
3080  // Call the old signal handler
3081  if (actp->sa_handler == SIG_DFL) {
3082    // It's more reasonable to let jvm treat it as an unexpected exception
3083    // instead of taking the default action.
3084    return false;
3085  } else if (actp->sa_handler != SIG_IGN) {
3086    if ((actp->sa_flags & SA_NODEFER) == 0) {
3087      // automaticlly block the signal
3088      sigaddset(&(actp->sa_mask), sig);
3089    }
3090
3091    sa_handler_t hand = NULL;
3092    sa_sigaction_t sa = NULL;
3093    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3094    // retrieve the chained handler
3095    if (siginfo_flag_set) {
3096      sa = actp->sa_sigaction;
3097    } else {
3098      hand = actp->sa_handler;
3099    }
3100
3101    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3102      actp->sa_handler = SIG_DFL;
3103    }
3104
3105    // try to honor the signal mask
3106    sigset_t oset;
3107    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3108
3109    // call into the chained handler
3110    if (siginfo_flag_set) {
3111      (*sa)(sig, siginfo, context);
3112    } else {
3113      (*hand)(sig);
3114    }
3115
3116    // restore the signal mask
3117    pthread_sigmask(SIG_SETMASK, &oset, 0);
3118  }
3119  // Tell jvm's signal handler the signal is taken care of.
3120  return true;
3121}
3122
3123bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3124  bool chained = false;
3125  // signal-chaining
3126  if (UseSignalChaining) {
3127    struct sigaction *actp = get_chained_signal_action(sig);
3128    if (actp != NULL) {
3129      chained = call_chained_handler(actp, sig, siginfo, context);
3130    }
3131  }
3132  return chained;
3133}
3134
3135struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3136  if (sigismember(&sigs, sig)) {
3137    return &sigact[sig];
3138  }
3139  return NULL;
3140}
3141
3142void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3143  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3144  sigact[sig] = oldAct;
3145  sigaddset(&sigs, sig);
3146}
3147
3148// for diagnostic
3149int sigflags[NSIG];
3150
3151int os::Aix::get_our_sigflags(int sig) {
3152  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3153  return sigflags[sig];
3154}
3155
3156void os::Aix::set_our_sigflags(int sig, int flags) {
3157  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3158  if (sig > 0 && sig < NSIG) {
3159    sigflags[sig] = flags;
3160  }
3161}
3162
3163void os::Aix::set_signal_handler(int sig, bool set_installed) {
3164  // Check for overwrite.
3165  struct sigaction oldAct;
3166  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3167
3168  void* oldhand = oldAct.sa_sigaction
3169    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3170    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3171  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3172      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3173      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3174    if (AllowUserSignalHandlers || !set_installed) {
3175      // Do not overwrite; user takes responsibility to forward to us.
3176      return;
3177    } else if (UseSignalChaining) {
3178      // save the old handler in jvm
3179      save_preinstalled_handler(sig, oldAct);
3180      // libjsig also interposes the sigaction() call below and saves the
3181      // old sigaction on it own.
3182    } else {
3183      fatal("Encountered unexpected pre-existing sigaction handler "
3184            "%#lx for signal %d.", (long)oldhand, sig);
3185    }
3186  }
3187
3188  struct sigaction sigAct;
3189  sigfillset(&(sigAct.sa_mask));
3190  if (!set_installed) {
3191    sigAct.sa_handler = SIG_DFL;
3192    sigAct.sa_flags = SA_RESTART;
3193  } else {
3194    sigAct.sa_sigaction = javaSignalHandler;
3195    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3196  }
3197  // Save flags, which are set by ours
3198  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3199  sigflags[sig] = sigAct.sa_flags;
3200
3201  int ret = sigaction(sig, &sigAct, &oldAct);
3202  assert(ret == 0, "check");
3203
3204  void* oldhand2 = oldAct.sa_sigaction
3205                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3206                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3207  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3208}
3209
3210// install signal handlers for signals that HotSpot needs to
3211// handle in order to support Java-level exception handling.
3212void os::Aix::install_signal_handlers() {
3213  if (!signal_handlers_are_installed) {
3214    signal_handlers_are_installed = true;
3215
3216    // signal-chaining
3217    typedef void (*signal_setting_t)();
3218    signal_setting_t begin_signal_setting = NULL;
3219    signal_setting_t end_signal_setting = NULL;
3220    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3221                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3222    if (begin_signal_setting != NULL) {
3223      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3224                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3225      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3226                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3227      libjsig_is_loaded = true;
3228      assert(UseSignalChaining, "should enable signal-chaining");
3229    }
3230    if (libjsig_is_loaded) {
3231      // Tell libjsig jvm is setting signal handlers.
3232      (*begin_signal_setting)();
3233    }
3234
3235    ::sigemptyset(&sigs);
3236    set_signal_handler(SIGSEGV, true);
3237    set_signal_handler(SIGPIPE, true);
3238    set_signal_handler(SIGBUS, true);
3239    set_signal_handler(SIGILL, true);
3240    set_signal_handler(SIGFPE, true);
3241    set_signal_handler(SIGTRAP, true);
3242    set_signal_handler(SIGXFSZ, true);
3243    set_signal_handler(SIGDANGER, true);
3244
3245    if (libjsig_is_loaded) {
3246      // Tell libjsig jvm finishes setting signal handlers.
3247      (*end_signal_setting)();
3248    }
3249
3250    // We don't activate signal checker if libjsig is in place, we trust ourselves
3251    // and if UserSignalHandler is installed all bets are off.
3252    // Log that signal checking is off only if -verbose:jni is specified.
3253    if (CheckJNICalls) {
3254      if (libjsig_is_loaded) {
3255        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3256        check_signals = false;
3257      }
3258      if (AllowUserSignalHandlers) {
3259        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3260        check_signals = false;
3261      }
3262      // Need to initialize check_signal_done.
3263      ::sigemptyset(&check_signal_done);
3264    }
3265  }
3266}
3267
3268static const char* get_signal_handler_name(address handler,
3269                                           char* buf, int buflen) {
3270  int offset;
3271  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3272  if (found) {
3273    // skip directory names
3274    const char *p1, *p2;
3275    p1 = buf;
3276    size_t len = strlen(os::file_separator());
3277    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3278    // The way os::dll_address_to_library_name is implemented on Aix
3279    // right now, it always returns -1 for the offset which is not
3280    // terribly informative.
3281    // Will fix that. For now, omit the offset.
3282    jio_snprintf(buf, buflen, "%s", p1);
3283  } else {
3284    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3285  }
3286  return buf;
3287}
3288
3289static void print_signal_handler(outputStream* st, int sig,
3290                                 char* buf, size_t buflen) {
3291  struct sigaction sa;
3292  sigaction(sig, NULL, &sa);
3293
3294  st->print("%s: ", os::exception_name(sig, buf, buflen));
3295
3296  address handler = (sa.sa_flags & SA_SIGINFO)
3297    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3298    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3299
3300  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3301    st->print("SIG_DFL");
3302  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3303    st->print("SIG_IGN");
3304  } else {
3305    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3306  }
3307
3308  // Print readable mask.
3309  st->print(", sa_mask[0]=");
3310  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3311
3312  address rh = VMError::get_resetted_sighandler(sig);
3313  // May be, handler was resetted by VMError?
3314  if (rh != NULL) {
3315    handler = rh;
3316    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3317  }
3318
3319  // Print textual representation of sa_flags.
3320  st->print(", sa_flags=");
3321  os::Posix::print_sa_flags(st, sa.sa_flags);
3322
3323  // Check: is it our handler?
3324  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3325      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3326    // It is our signal handler.
3327    // Check for flags, reset system-used one!
3328    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3329      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3330                os::Aix::get_our_sigflags(sig));
3331    }
3332  }
3333  st->cr();
3334}
3335
3336#define DO_SIGNAL_CHECK(sig) \
3337  if (!sigismember(&check_signal_done, sig)) \
3338    os::Aix::check_signal_handler(sig)
3339
3340// This method is a periodic task to check for misbehaving JNI applications
3341// under CheckJNI, we can add any periodic checks here
3342
3343void os::run_periodic_checks() {
3344
3345  if (check_signals == false) return;
3346
3347  // SEGV and BUS if overridden could potentially prevent
3348  // generation of hs*.log in the event of a crash, debugging
3349  // such a case can be very challenging, so we absolutely
3350  // check the following for a good measure:
3351  DO_SIGNAL_CHECK(SIGSEGV);
3352  DO_SIGNAL_CHECK(SIGILL);
3353  DO_SIGNAL_CHECK(SIGFPE);
3354  DO_SIGNAL_CHECK(SIGBUS);
3355  DO_SIGNAL_CHECK(SIGPIPE);
3356  DO_SIGNAL_CHECK(SIGXFSZ);
3357  if (UseSIGTRAP) {
3358    DO_SIGNAL_CHECK(SIGTRAP);
3359  }
3360  DO_SIGNAL_CHECK(SIGDANGER);
3361
3362  // ReduceSignalUsage allows the user to override these handlers
3363  // see comments at the very top and jvm_solaris.h
3364  if (!ReduceSignalUsage) {
3365    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3366    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3367    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3368    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3369  }
3370
3371  DO_SIGNAL_CHECK(SR_signum);
3372}
3373
3374typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3375
3376static os_sigaction_t os_sigaction = NULL;
3377
3378void os::Aix::check_signal_handler(int sig) {
3379  char buf[O_BUFLEN];
3380  address jvmHandler = NULL;
3381
3382  struct sigaction act;
3383  if (os_sigaction == NULL) {
3384    // only trust the default sigaction, in case it has been interposed
3385    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3386    if (os_sigaction == NULL) return;
3387  }
3388
3389  os_sigaction(sig, (struct sigaction*)NULL, &act);
3390
3391  address thisHandler = (act.sa_flags & SA_SIGINFO)
3392    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3393    : CAST_FROM_FN_PTR(address, act.sa_handler);
3394
3395  switch(sig) {
3396  case SIGSEGV:
3397  case SIGBUS:
3398  case SIGFPE:
3399  case SIGPIPE:
3400  case SIGILL:
3401  case SIGXFSZ:
3402    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3403    break;
3404
3405  case SHUTDOWN1_SIGNAL:
3406  case SHUTDOWN2_SIGNAL:
3407  case SHUTDOWN3_SIGNAL:
3408  case BREAK_SIGNAL:
3409    jvmHandler = (address)user_handler();
3410    break;
3411
3412  default:
3413    if (sig == SR_signum) {
3414      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3415    } else {
3416      return;
3417    }
3418    break;
3419  }
3420
3421  if (thisHandler != jvmHandler) {
3422    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3423    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3424    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3425    // No need to check this sig any longer
3426    sigaddset(&check_signal_done, sig);
3427    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3428    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3429      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3430                    exception_name(sig, buf, O_BUFLEN));
3431    }
3432  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3433    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3434    tty->print("expected:");
3435    os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3436    tty->cr();
3437    tty->print("  found:");
3438    os::Posix::print_sa_flags(tty, act.sa_flags);
3439    tty->cr();
3440    // No need to check this sig any longer
3441    sigaddset(&check_signal_done, sig);
3442  }
3443
3444  // Dump all the signal
3445  if (sigismember(&check_signal_done, sig)) {
3446    print_signal_handlers(tty, buf, O_BUFLEN);
3447  }
3448}
3449
3450// To install functions for atexit system call
3451extern "C" {
3452  static void perfMemory_exit_helper() {
3453    perfMemory_exit();
3454  }
3455}
3456
3457// This is called _before_ the most of global arguments have been parsed.
3458void os::init(void) {
3459  // This is basic, we want to know if that ever changes.
3460  // (Shared memory boundary is supposed to be a 256M aligned.)
3461  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3462
3463  // Record process break at startup.
3464  g_brk_at_startup = (address) ::sbrk(0);
3465  assert(g_brk_at_startup != (address) -1, "sbrk failed");
3466
3467  // First off, we need to know whether we run on AIX or PASE, and
3468  // the OS level we run on.
3469  os::Aix::initialize_os_info();
3470
3471  // Scan environment (SPEC1170 behaviour, etc).
3472  os::Aix::scan_environment();
3473
3474  // Probe multipage support.
3475  query_multipage_support();
3476
3477  // Act like we only have one page size by eliminating corner cases which
3478  // we did not support very well anyway.
3479  // We have two input conditions:
3480  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3481  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3482  //    setting.
3483  //    Data segment page size is important for us because it defines the thread stack page
3484  //    size, which is needed for guard page handling, stack banging etc.
3485  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3486  //    and should be allocated with 64k pages.
3487  //
3488  // So, we do the following:
3489  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3490  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3491  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3492  // 64k          no              --- AIX 5.2 ? ---
3493  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3494
3495  // We explicitly leave no option to change page size, because only upgrading would work,
3496  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3497
3498  if (g_multipage_support.datapsize == SIZE_4K) {
3499    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3500    if (g_multipage_support.can_use_64K_pages) {
3501      // .. but we are able to use 64K pages dynamically.
3502      // This would be typical for java launchers which are not linked
3503      // with datapsize=64K (like, any other launcher but our own).
3504      //
3505      // In this case it would be smart to allocate the java heap with 64K
3506      // to get the performance benefit, and to fake 64k pages for the
3507      // data segment (when dealing with thread stacks).
3508      //
3509      // However, leave a possibility to downgrade to 4K, using
3510      // -XX:-Use64KPages.
3511      if (Use64KPages) {
3512        trcVerbose("64K page mode (faked for data segment)");
3513        Aix::_page_size = SIZE_64K;
3514      } else {
3515        trcVerbose("4K page mode (Use64KPages=off)");
3516        Aix::_page_size = SIZE_4K;
3517      }
3518    } else {
3519      // .. and not able to allocate 64k pages dynamically. Here, just
3520      // fall back to 4K paged mode and use mmap for everything.
3521      trcVerbose("4K page mode");
3522      Aix::_page_size = SIZE_4K;
3523      FLAG_SET_ERGO(bool, Use64KPages, false);
3524    }
3525  } else {
3526    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3527    // This normally means that we can allocate 64k pages dynamically.
3528    // (There is one special case where this may be false: EXTSHM=on.
3529    // but we decided to not support that mode).
3530    assert0(g_multipage_support.can_use_64K_pages);
3531    Aix::_page_size = SIZE_64K;
3532    trcVerbose("64K page mode");
3533    FLAG_SET_ERGO(bool, Use64KPages, true);
3534  }
3535
3536  // Short-wire stack page size to base page size; if that works, we just remove
3537  // that stack page size altogether.
3538  Aix::_stack_page_size = Aix::_page_size;
3539
3540  // For now UseLargePages is just ignored.
3541  FLAG_SET_ERGO(bool, UseLargePages, false);
3542  _page_sizes[0] = 0;
3543
3544  // debug trace
3545  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3546
3547  // Next, we need to initialize libo4 and libperfstat libraries.
3548  if (os::Aix::on_pase()) {
3549    os::Aix::initialize_libo4();
3550  } else {
3551    os::Aix::initialize_libperfstat();
3552  }
3553
3554  // Reset the perfstat information provided by ODM.
3555  if (os::Aix::on_aix()) {
3556    libperfstat::perfstat_reset();
3557  }
3558
3559  // Now initialze basic system properties. Note that for some of the values we
3560  // need libperfstat etc.
3561  os::Aix::initialize_system_info();
3562
3563  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3564
3565  init_random(1234567);
3566
3567  ThreadCritical::initialize();
3568
3569  // Main_thread points to the aboriginal thread.
3570  Aix::_main_thread = pthread_self();
3571
3572  initial_time_count = os::elapsed_counter();
3573}
3574
3575// This is called _after_ the global arguments have been parsed.
3576jint os::init_2(void) {
3577
3578  if (os::Aix::on_pase()) {
3579    trcVerbose("Running on PASE.");
3580  } else {
3581    trcVerbose("Running on AIX (not PASE).");
3582  }
3583
3584  trcVerbose("processor count: %d", os::_processor_count);
3585  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3586
3587  // Initially build up the loaded dll map.
3588  LoadedLibraries::reload();
3589  if (Verbose) {
3590    trcVerbose("Loaded Libraries: ");
3591    LoadedLibraries::print(tty);
3592  }
3593
3594  const int page_size = Aix::page_size();
3595  const int map_size = page_size;
3596
3597  address map_address = (address) MAP_FAILED;
3598  const int prot  = PROT_READ;
3599  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3600
3601  // Use optimized addresses for the polling page,
3602  // e.g. map it to a special 32-bit address.
3603  if (OptimizePollingPageLocation) {
3604    // architecture-specific list of address wishes:
3605    address address_wishes[] = {
3606      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3607      // PPC64: all address wishes are non-negative 32 bit values where
3608      // the lower 16 bits are all zero. we can load these addresses
3609      // with a single ppc_lis instruction.
3610      (address) 0x30000000, (address) 0x31000000,
3611      (address) 0x32000000, (address) 0x33000000,
3612      (address) 0x40000000, (address) 0x41000000,
3613      (address) 0x42000000, (address) 0x43000000,
3614      (address) 0x50000000, (address) 0x51000000,
3615      (address) 0x52000000, (address) 0x53000000,
3616      (address) 0x60000000, (address) 0x61000000,
3617      (address) 0x62000000, (address) 0x63000000
3618    };
3619    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3620
3621    // iterate over the list of address wishes:
3622    for (int i=0; i<address_wishes_length; i++) {
3623      // Try to map with current address wish.
3624      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3625      // fail if the address is already mapped.
3626      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3627                                     map_size, prot,
3628                                     flags | MAP_FIXED,
3629                                     -1, 0);
3630      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3631                   address_wishes[i], map_address + (ssize_t)page_size);
3632
3633      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3634        // Map succeeded and map_address is at wished address, exit loop.
3635        break;
3636      }
3637
3638      if (map_address != (address) MAP_FAILED) {
3639        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3640        ::munmap(map_address, map_size);
3641        map_address = (address) MAP_FAILED;
3642      }
3643      // Map failed, continue loop.
3644    }
3645  } // end OptimizePollingPageLocation
3646
3647  if (map_address == (address) MAP_FAILED) {
3648    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3649  }
3650  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3651  os::set_polling_page(map_address);
3652
3653  if (!UseMembar) {
3654    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3655    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3656    os::set_memory_serialize_page(mem_serialize_page);
3657
3658    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3659        mem_serialize_page, mem_serialize_page + Aix::page_size(),
3660        Aix::page_size(), Aix::page_size());
3661  }
3662
3663  // initialize suspend/resume support - must do this before signal_sets_init()
3664  if (SR_initialize() != 0) {
3665    perror("SR_initialize failed");
3666    return JNI_ERR;
3667  }
3668
3669  Aix::signal_sets_init();
3670  Aix::install_signal_handlers();
3671
3672  // Check minimum allowable stack size for thread creation and to initialize
3673  // the java system classes, including StackOverflowError - depends on page
3674  // size. Add a page for compiler2 recursion in main thread.
3675  // Add in 2*BytesPerWord times page size to account for VM stack during
3676  // class initialization depending on 32 or 64 bit VM.
3677  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3678                                    JavaThread::stack_guard_zone_size() +
3679                                    JavaThread::stack_shadow_zone_size() +
3680                                    (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3681
3682  os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3683
3684  size_t threadStackSizeInBytes = ThreadStackSize * K;
3685  if (threadStackSizeInBytes != 0 &&
3686      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3687    tty->print_cr("\nThe stack size specified is too small, "
3688                  "Specify at least %dk",
3689                  os::Aix::min_stack_allowed / K);
3690    return JNI_ERR;
3691  }
3692
3693  // Make the stack size a multiple of the page size so that
3694  // the yellow/red zones can be guarded.
3695  // Note that this can be 0, if no default stacksize was set.
3696  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3697
3698  if (UseNUMA) {
3699    UseNUMA = false;
3700    warning("NUMA optimizations are not available on this OS.");
3701  }
3702
3703  if (MaxFDLimit) {
3704    // Set the number of file descriptors to max. print out error
3705    // if getrlimit/setrlimit fails but continue regardless.
3706    struct rlimit nbr_files;
3707    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3708    if (status != 0) {
3709      if (PrintMiscellaneous && (Verbose || WizardMode))
3710        perror("os::init_2 getrlimit failed");
3711    } else {
3712      nbr_files.rlim_cur = nbr_files.rlim_max;
3713      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3714      if (status != 0) {
3715        if (PrintMiscellaneous && (Verbose || WizardMode))
3716          perror("os::init_2 setrlimit failed");
3717      }
3718    }
3719  }
3720
3721  if (PerfAllowAtExitRegistration) {
3722    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3723    // At exit functions can be delayed until process exit time, which
3724    // can be problematic for embedded VM situations. Embedded VMs should
3725    // call DestroyJavaVM() to assure that VM resources are released.
3726
3727    // Note: perfMemory_exit_helper atexit function may be removed in
3728    // the future if the appropriate cleanup code can be added to the
3729    // VM_Exit VMOperation's doit method.
3730    if (atexit(perfMemory_exit_helper) != 0) {
3731      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3732    }
3733  }
3734
3735  return JNI_OK;
3736}
3737
3738// Mark the polling page as unreadable
3739void os::make_polling_page_unreadable(void) {
3740  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3741    fatal("Could not disable polling page");
3742  }
3743};
3744
3745// Mark the polling page as readable
3746void os::make_polling_page_readable(void) {
3747  // Changed according to os_linux.cpp.
3748  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3749    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3750  }
3751};
3752
3753int os::active_processor_count() {
3754  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3755  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3756  return online_cpus;
3757}
3758
3759void os::set_native_thread_name(const char *name) {
3760  // Not yet implemented.
3761  return;
3762}
3763
3764bool os::distribute_processes(uint length, uint* distribution) {
3765  // Not yet implemented.
3766  return false;
3767}
3768
3769bool os::bind_to_processor(uint processor_id) {
3770  // Not yet implemented.
3771  return false;
3772}
3773
3774void os::SuspendedThreadTask::internal_do_task() {
3775  if (do_suspend(_thread->osthread())) {
3776    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3777    do_task(context);
3778    do_resume(_thread->osthread());
3779  }
3780}
3781
3782class PcFetcher : public os::SuspendedThreadTask {
3783public:
3784  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3785  ExtendedPC result();
3786protected:
3787  void do_task(const os::SuspendedThreadTaskContext& context);
3788private:
3789  ExtendedPC _epc;
3790};
3791
3792ExtendedPC PcFetcher::result() {
3793  guarantee(is_done(), "task is not done yet.");
3794  return _epc;
3795}
3796
3797void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3798  Thread* thread = context.thread();
3799  OSThread* osthread = thread->osthread();
3800  if (osthread->ucontext() != NULL) {
3801    _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3802  } else {
3803    // NULL context is unexpected, double-check this is the VMThread.
3804    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3805  }
3806}
3807
3808// Suspends the target using the signal mechanism and then grabs the PC before
3809// resuming the target. Used by the flat-profiler only
3810ExtendedPC os::get_thread_pc(Thread* thread) {
3811  // Make sure that it is called by the watcher for the VMThread.
3812  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3813  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3814
3815  PcFetcher fetcher(thread);
3816  fetcher.run();
3817  return fetcher.result();
3818}
3819
3820////////////////////////////////////////////////////////////////////////////////
3821// debug support
3822
3823bool os::find(address addr, outputStream* st) {
3824
3825  st->print(PTR_FORMAT ": ", addr);
3826
3827  loaded_module_t lm;
3828  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3829      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3830    st->print("%s", lm.path);
3831    return true;
3832  }
3833
3834  return false;
3835}
3836
3837////////////////////////////////////////////////////////////////////////////////
3838// misc
3839
3840// This does not do anything on Aix. This is basically a hook for being
3841// able to use structured exception handling (thread-local exception filters)
3842// on, e.g., Win32.
3843void
3844os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3845                         JavaCallArguments* args, Thread* thread) {
3846  f(value, method, args, thread);
3847}
3848
3849void os::print_statistics() {
3850}
3851
3852bool os::message_box(const char* title, const char* message) {
3853  int i;
3854  fdStream err(defaultStream::error_fd());
3855  for (i = 0; i < 78; i++) err.print_raw("=");
3856  err.cr();
3857  err.print_raw_cr(title);
3858  for (i = 0; i < 78; i++) err.print_raw("-");
3859  err.cr();
3860  err.print_raw_cr(message);
3861  for (i = 0; i < 78; i++) err.print_raw("=");
3862  err.cr();
3863
3864  char buf[16];
3865  // Prevent process from exiting upon "read error" without consuming all CPU
3866  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3867
3868  return buf[0] == 'y' || buf[0] == 'Y';
3869}
3870
3871int os::stat(const char *path, struct stat *sbuf) {
3872  char pathbuf[MAX_PATH];
3873  if (strlen(path) > MAX_PATH - 1) {
3874    errno = ENAMETOOLONG;
3875    return -1;
3876  }
3877  os::native_path(strcpy(pathbuf, path));
3878  return ::stat(pathbuf, sbuf);
3879}
3880
3881bool os::check_heap(bool force) {
3882  return true;
3883}
3884
3885// Is a (classpath) directory empty?
3886bool os::dir_is_empty(const char* path) {
3887  DIR *dir = NULL;
3888  struct dirent *ptr;
3889
3890  dir = opendir(path);
3891  if (dir == NULL) return true;
3892
3893  /* Scan the directory */
3894  bool result = true;
3895  char buf[sizeof(struct dirent) + MAX_PATH];
3896  while (result && (ptr = ::readdir(dir)) != NULL) {
3897    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3898      result = false;
3899    }
3900  }
3901  closedir(dir);
3902  return result;
3903}
3904
3905// This code originates from JDK's sysOpen and open64_w
3906// from src/solaris/hpi/src/system_md.c
3907
3908int os::open(const char *path, int oflag, int mode) {
3909
3910  if (strlen(path) > MAX_PATH - 1) {
3911    errno = ENAMETOOLONG;
3912    return -1;
3913  }
3914  int fd;
3915
3916  fd = ::open64(path, oflag, mode);
3917  if (fd == -1) return -1;
3918
3919  // If the open succeeded, the file might still be a directory.
3920  {
3921    struct stat64 buf64;
3922    int ret = ::fstat64(fd, &buf64);
3923    int st_mode = buf64.st_mode;
3924
3925    if (ret != -1) {
3926      if ((st_mode & S_IFMT) == S_IFDIR) {
3927        errno = EISDIR;
3928        ::close(fd);
3929        return -1;
3930      }
3931    } else {
3932      ::close(fd);
3933      return -1;
3934    }
3935  }
3936
3937  // All file descriptors that are opened in the JVM and not
3938  // specifically destined for a subprocess should have the
3939  // close-on-exec flag set. If we don't set it, then careless 3rd
3940  // party native code might fork and exec without closing all
3941  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3942  // UNIXProcess.c), and this in turn might:
3943  //
3944  // - cause end-of-file to fail to be detected on some file
3945  //   descriptors, resulting in mysterious hangs, or
3946  //
3947  // - might cause an fopen in the subprocess to fail on a system
3948  //   suffering from bug 1085341.
3949  //
3950  // (Yes, the default setting of the close-on-exec flag is a Unix
3951  // design flaw.)
3952  //
3953  // See:
3954  // 1085341: 32-bit stdio routines should support file descriptors >255
3955  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3956  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3957#ifdef FD_CLOEXEC
3958  {
3959    int flags = ::fcntl(fd, F_GETFD);
3960    if (flags != -1)
3961      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3962  }
3963#endif
3964
3965  return fd;
3966}
3967
3968// create binary file, rewriting existing file if required
3969int os::create_binary_file(const char* path, bool rewrite_existing) {
3970  int oflags = O_WRONLY | O_CREAT;
3971  if (!rewrite_existing) {
3972    oflags |= O_EXCL;
3973  }
3974  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3975}
3976
3977// return current position of file pointer
3978jlong os::current_file_offset(int fd) {
3979  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3980}
3981
3982// move file pointer to the specified offset
3983jlong os::seek_to_file_offset(int fd, jlong offset) {
3984  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3985}
3986
3987// This code originates from JDK's sysAvailable
3988// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3989
3990int os::available(int fd, jlong *bytes) {
3991  jlong cur, end;
3992  int mode;
3993  struct stat64 buf64;
3994
3995  if (::fstat64(fd, &buf64) >= 0) {
3996    mode = buf64.st_mode;
3997    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3998      int n;
3999      if (::ioctl(fd, FIONREAD, &n) >= 0) {
4000        *bytes = n;
4001        return 1;
4002      }
4003    }
4004  }
4005  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4006    return 0;
4007  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4008    return 0;
4009  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4010    return 0;
4011  }
4012  *bytes = end - cur;
4013  return 1;
4014}
4015
4016// Map a block of memory.
4017char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4018                        char *addr, size_t bytes, bool read_only,
4019                        bool allow_exec) {
4020  int prot;
4021  int flags = MAP_PRIVATE;
4022
4023  if (read_only) {
4024    prot = PROT_READ;
4025    flags = MAP_SHARED;
4026  } else {
4027    prot = PROT_READ | PROT_WRITE;
4028    flags = MAP_PRIVATE;
4029  }
4030
4031  if (allow_exec) {
4032    prot |= PROT_EXEC;
4033  }
4034
4035  if (addr != NULL) {
4036    flags |= MAP_FIXED;
4037  }
4038
4039  // Allow anonymous mappings if 'fd' is -1.
4040  if (fd == -1) {
4041    flags |= MAP_ANONYMOUS;
4042  }
4043
4044  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4045                                     fd, file_offset);
4046  if (mapped_address == MAP_FAILED) {
4047    return NULL;
4048  }
4049  return mapped_address;
4050}
4051
4052// Remap a block of memory.
4053char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4054                          char *addr, size_t bytes, bool read_only,
4055                          bool allow_exec) {
4056  // same as map_memory() on this OS
4057  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4058                        allow_exec);
4059}
4060
4061// Unmap a block of memory.
4062bool os::pd_unmap_memory(char* addr, size_t bytes) {
4063  return munmap(addr, bytes) == 0;
4064}
4065
4066// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4067// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4068// of a thread.
4069//
4070// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4071// the fast estimate available on the platform.
4072
4073jlong os::current_thread_cpu_time() {
4074  // return user + sys since the cost is the same
4075  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4076  assert(n >= 0, "negative CPU time");
4077  return n;
4078}
4079
4080jlong os::thread_cpu_time(Thread* thread) {
4081  // consistent with what current_thread_cpu_time() returns
4082  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4083  assert(n >= 0, "negative CPU time");
4084  return n;
4085}
4086
4087jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4088  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4089  assert(n >= 0, "negative CPU time");
4090  return n;
4091}
4092
4093static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4094  bool error = false;
4095
4096  jlong sys_time = 0;
4097  jlong user_time = 0;
4098
4099  // Reimplemented using getthrds64().
4100  //
4101  // Works like this:
4102  // For the thread in question, get the kernel thread id. Then get the
4103  // kernel thread statistics using that id.
4104  //
4105  // This only works of course when no pthread scheduling is used,
4106  // i.e. there is a 1:1 relationship to kernel threads.
4107  // On AIX, see AIXTHREAD_SCOPE variable.
4108
4109  pthread_t pthtid = thread->osthread()->pthread_id();
4110
4111  // retrieve kernel thread id for the pthread:
4112  tid64_t tid = 0;
4113  struct __pthrdsinfo pinfo;
4114  // I just love those otherworldly IBM APIs which force me to hand down
4115  // dummy buffers for stuff I dont care for...
4116  char dummy[1];
4117  int dummy_size = sizeof(dummy);
4118  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4119                          dummy, &dummy_size) == 0) {
4120    tid = pinfo.__pi_tid;
4121  } else {
4122    tty->print_cr("pthread_getthrds_np failed.");
4123    error = true;
4124  }
4125
4126  // retrieve kernel timing info for that kernel thread
4127  if (!error) {
4128    struct thrdentry64 thrdentry;
4129    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4130      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4131      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4132    } else {
4133      tty->print_cr("pthread_getthrds_np failed.");
4134      error = true;
4135    }
4136  }
4137
4138  if (p_sys_time) {
4139    *p_sys_time = sys_time;
4140  }
4141
4142  if (p_user_time) {
4143    *p_user_time = user_time;
4144  }
4145
4146  if (error) {
4147    return false;
4148  }
4149
4150  return true;
4151}
4152
4153jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4154  jlong sys_time;
4155  jlong user_time;
4156
4157  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4158    return -1;
4159  }
4160
4161  return user_sys_cpu_time ? sys_time + user_time : user_time;
4162}
4163
4164void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4165  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4166  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4167  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4168  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4169}
4170
4171void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4172  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4173  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4174  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4175  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4176}
4177
4178bool os::is_thread_cpu_time_supported() {
4179  return true;
4180}
4181
4182// System loadavg support. Returns -1 if load average cannot be obtained.
4183// For now just return the system wide load average (no processor sets).
4184int os::loadavg(double values[], int nelem) {
4185
4186  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4187  guarantee(values, "argument error");
4188
4189  if (os::Aix::on_pase()) {
4190
4191    // AS/400 PASE: use libo4 porting library
4192    double v[3] = { 0.0, 0.0, 0.0 };
4193
4194    if (libo4::get_load_avg(v, v + 1, v + 2)) {
4195      for (int i = 0; i < nelem; i ++) {
4196        values[i] = v[i];
4197      }
4198      return nelem;
4199    } else {
4200      return -1;
4201    }
4202
4203  } else {
4204
4205    // AIX: use libperfstat
4206    libperfstat::cpuinfo_t ci;
4207    if (libperfstat::get_cpuinfo(&ci)) {
4208      for (int i = 0; i < nelem; i++) {
4209        values[i] = ci.loadavg[i];
4210      }
4211    } else {
4212      return -1;
4213    }
4214    return nelem;
4215  }
4216}
4217
4218void os::pause() {
4219  char filename[MAX_PATH];
4220  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4221    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4222  } else {
4223    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4224  }
4225
4226  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4227  if (fd != -1) {
4228    struct stat buf;
4229    ::close(fd);
4230    while (::stat(filename, &buf) == 0) {
4231      (void)::poll(NULL, 0, 100);
4232    }
4233  } else {
4234    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4235  }
4236}
4237
4238bool os::Aix::is_primordial_thread() {
4239  if (pthread_self() == (pthread_t)1) {
4240    return true;
4241  } else {
4242    return false;
4243  }
4244}
4245
4246// OS recognitions (PASE/AIX, OS level) call this before calling any
4247// one of Aix::on_pase(), Aix::os_version() static
4248void os::Aix::initialize_os_info() {
4249
4250  assert(_on_pase == -1 && _os_version == 0, "already called.");
4251
4252  struct utsname uts;
4253  memset(&uts, 0, sizeof(uts));
4254  strcpy(uts.sysname, "?");
4255  if (::uname(&uts) == -1) {
4256    trcVerbose("uname failed (%d)", errno);
4257    guarantee(0, "Could not determine whether we run on AIX or PASE");
4258  } else {
4259    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4260               "node \"%s\" machine \"%s\"\n",
4261               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4262    const int major = atoi(uts.version);
4263    assert(major > 0, "invalid OS version");
4264    const int minor = atoi(uts.release);
4265    assert(minor > 0, "invalid OS release");
4266    _os_version = (major << 24) | (minor << 16);
4267    char ver_str[20] = {0};
4268    char *name_str = "unknown OS";
4269    if (strcmp(uts.sysname, "OS400") == 0) {
4270      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4271      _on_pase = 1;
4272      if (os_version_short() < 0x0504) {
4273        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4274        assert(false, "OS/400 release too old.");
4275      }
4276      name_str = "OS/400 (pase)";
4277      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4278    } else if (strcmp(uts.sysname, "AIX") == 0) {
4279      // We run on AIX. We do not support versions older than AIX 5.3.
4280      _on_pase = 0;
4281      // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4282      odmWrapper::determine_os_kernel_version(&_os_version);
4283      if (os_version_short() < 0x0503) {
4284        trcVerbose("AIX release older than AIX 5.3 not supported.");
4285        assert(false, "AIX release too old.");
4286      }
4287      name_str = "AIX";
4288      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4289                   major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4290    } else {
4291      assert(false, name_str);
4292    }
4293    trcVerbose("We run on %s %s", name_str, ver_str);
4294  }
4295
4296  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4297} // end: os::Aix::initialize_os_info()
4298
4299// Scan environment for important settings which might effect the VM.
4300// Trace out settings. Warn about invalid settings and/or correct them.
4301//
4302// Must run after os::Aix::initialue_os_info().
4303void os::Aix::scan_environment() {
4304
4305  char* p;
4306  int rc;
4307
4308  // Warn explicity if EXTSHM=ON is used. That switch changes how
4309  // System V shared memory behaves. One effect is that page size of
4310  // shared memory cannot be change dynamically, effectivly preventing
4311  // large pages from working.
4312  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4313  // recommendation is (in OSS notes) to switch it off.
4314  p = ::getenv("EXTSHM");
4315  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4316  if (p && strcasecmp(p, "ON") == 0) {
4317    _extshm = 1;
4318    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4319    if (!AllowExtshm) {
4320      // We allow under certain conditions the user to continue. However, we want this
4321      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4322      // that the VM is not able to allocate 64k pages for the heap.
4323      // We do not want to run with reduced performance.
4324      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4325    }
4326  } else {
4327    _extshm = 0;
4328  }
4329
4330  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4331  // Not tested, not supported.
4332  //
4333  // Note that it might be worth the trouble to test and to require it, if only to
4334  // get useful return codes for mprotect.
4335  //
4336  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4337  // exec() ? before loading the libjvm ? ....)
4338  p = ::getenv("XPG_SUS_ENV");
4339  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4340  if (p && strcmp(p, "ON") == 0) {
4341    _xpg_sus_mode = 1;
4342    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4343    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4344    // clobber address ranges. If we ever want to support that, we have to do some
4345    // testing first.
4346    guarantee(false, "XPG_SUS_ENV=ON not supported");
4347  } else {
4348    _xpg_sus_mode = 0;
4349  }
4350
4351  if (os::Aix::on_pase()) {
4352    p = ::getenv("QIBM_MULTI_THREADED");
4353    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4354  }
4355
4356  p = ::getenv("LDR_CNTRL");
4357  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4358  if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4359    if (p && ::strstr(p, "TEXTPSIZE")) {
4360      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4361        "you may experience hangs or crashes on OS/400 V7R1.");
4362    }
4363  }
4364
4365  p = ::getenv("AIXTHREAD_GUARDPAGES");
4366  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4367
4368} // end: os::Aix::scan_environment()
4369
4370// PASE: initialize the libo4 library (PASE porting library).
4371void os::Aix::initialize_libo4() {
4372  guarantee(os::Aix::on_pase(), "OS/400 only.");
4373  if (!libo4::init()) {
4374    trcVerbose("libo4 initialization failed.");
4375    assert(false, "libo4 initialization failed");
4376  } else {
4377    trcVerbose("libo4 initialized.");
4378  }
4379}
4380
4381// AIX: initialize the libperfstat library.
4382void os::Aix::initialize_libperfstat() {
4383  assert(os::Aix::on_aix(), "AIX only");
4384  if (!libperfstat::init()) {
4385    trcVerbose("libperfstat initialization failed.");
4386    assert(false, "libperfstat initialization failed");
4387  } else {
4388    trcVerbose("libperfstat initialized.");
4389  }
4390}
4391
4392/////////////////////////////////////////////////////////////////////////////
4393// thread stack
4394
4395// Function to query the current stack size using pthread_getthrds_np.
4396static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4397  // This only works when invoked on a pthread. As we agreed not to use
4398  // primordial threads anyway, I assert here.
4399  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4400
4401  // Information about this api can be found (a) in the pthread.h header and
4402  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4403  //
4404  // The use of this API to find out the current stack is kind of undefined.
4405  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4406  // enough for cases where I let the pthread library create its stacks. For cases
4407  // where I create an own stack and pass this to pthread_create, it seems not to
4408  // work (the returned stack size in that case is 0).
4409
4410  pthread_t tid = pthread_self();
4411  struct __pthrdsinfo pinfo;
4412  char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4413  int dummy_size = sizeof(dummy);
4414
4415  memset(&pinfo, 0, sizeof(pinfo));
4416
4417  const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4418                                     sizeof(pinfo), dummy, &dummy_size);
4419
4420  if (rc != 0) {
4421    assert0(false);
4422    trcVerbose("pthread_getthrds_np failed (%d)", rc);
4423    return false;
4424  }
4425  guarantee0(pinfo.__pi_stackend);
4426
4427  // The following may happen when invoking pthread_getthrds_np on a pthread
4428  // running on a user provided stack (when handing down a stack to pthread
4429  // create, see pthread_attr_setstackaddr).
4430  // Not sure what to do then.
4431
4432  guarantee0(pinfo.__pi_stacksize);
4433
4434  // Note: we get three values from pthread_getthrds_np:
4435  //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4436  //
4437  // high addr    ---------------------
4438  //
4439  //    |         pthread internal data, like ~2K
4440  //    |
4441  //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4442  //    |
4443  //    |
4444  //    |
4445  //    |
4446  //    |
4447  //    |
4448  //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4449  //    |
4450  //    |          padding to align the following AIX guard pages, if enabled.
4451  //    |
4452  //    V          ---------------------   __pi_stackaddr
4453  //
4454  // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4455  //
4456
4457  address stack_base = (address)(pinfo.__pi_stackend);
4458  address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
4459    os::vm_page_size());
4460  size_t stack_size = stack_base - stack_low_addr;
4461
4462  if (p_stack_base) {
4463    *p_stack_base = stack_base;
4464  }
4465
4466  if (p_stack_size) {
4467    *p_stack_size = stack_size;
4468  }
4469
4470  return true;
4471}
4472
4473// Get the current stack base from the OS (actually, the pthread library).
4474address os::current_stack_base() {
4475  address p;
4476  query_stack_dimensions(&p, 0);
4477  return p;
4478}
4479
4480// Get the current stack size from the OS (actually, the pthread library).
4481size_t os::current_stack_size() {
4482  size_t s;
4483  query_stack_dimensions(0, &s);
4484  return s;
4485}
4486
4487// Refer to the comments in os_solaris.cpp park-unpark.
4488
4489// utility to compute the abstime argument to timedwait:
4490// millis is the relative timeout time
4491// abstime will be the absolute timeout time
4492// TODO: replace compute_abstime() with unpackTime()
4493
4494static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4495  if (millis < 0) millis = 0;
4496  struct timeval now;
4497  int status = gettimeofday(&now, NULL);
4498  assert(status == 0, "gettimeofday");
4499  jlong seconds = millis / 1000;
4500  millis %= 1000;
4501  if (seconds > 50000000) { // see man cond_timedwait(3T)
4502    seconds = 50000000;
4503  }
4504  abstime->tv_sec = now.tv_sec  + seconds;
4505  long       usec = now.tv_usec + millis * 1000;
4506  if (usec >= 1000000) {
4507    abstime->tv_sec += 1;
4508    usec -= 1000000;
4509  }
4510  abstime->tv_nsec = usec * 1000;
4511  return abstime;
4512}
4513
4514// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4515// Conceptually TryPark() should be equivalent to park(0).
4516
4517int os::PlatformEvent::TryPark() {
4518  for (;;) {
4519    const int v = _Event;
4520    guarantee ((v == 0) || (v == 1), "invariant");
4521    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4522  }
4523}
4524
4525void os::PlatformEvent::park() {       // AKA "down()"
4526  // Invariant: Only the thread associated with the Event/PlatformEvent
4527  // may call park().
4528  // TODO: assert that _Assoc != NULL or _Assoc == Self
4529  int v;
4530  for (;;) {
4531    v = _Event;
4532    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4533  }
4534  guarantee (v >= 0, "invariant");
4535  if (v == 0) {
4536    // Do this the hard way by blocking ...
4537    int status = pthread_mutex_lock(_mutex);
4538    assert_status(status == 0, status, "mutex_lock");
4539    guarantee (_nParked == 0, "invariant");
4540    ++ _nParked;
4541    while (_Event < 0) {
4542      status = pthread_cond_wait(_cond, _mutex);
4543      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4544    }
4545    -- _nParked;
4546
4547    // In theory we could move the ST of 0 into _Event past the unlock(),
4548    // but then we'd need a MEMBAR after the ST.
4549    _Event = 0;
4550    status = pthread_mutex_unlock(_mutex);
4551    assert_status(status == 0, status, "mutex_unlock");
4552  }
4553  guarantee (_Event >= 0, "invariant");
4554}
4555
4556int os::PlatformEvent::park(jlong millis) {
4557  guarantee (_nParked == 0, "invariant");
4558
4559  int v;
4560  for (;;) {
4561    v = _Event;
4562    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4563  }
4564  guarantee (v >= 0, "invariant");
4565  if (v != 0) return OS_OK;
4566
4567  // We do this the hard way, by blocking the thread.
4568  // Consider enforcing a minimum timeout value.
4569  struct timespec abst;
4570  compute_abstime(&abst, millis);
4571
4572  int ret = OS_TIMEOUT;
4573  int status = pthread_mutex_lock(_mutex);
4574  assert_status(status == 0, status, "mutex_lock");
4575  guarantee (_nParked == 0, "invariant");
4576  ++_nParked;
4577
4578  // Object.wait(timo) will return because of
4579  // (a) notification
4580  // (b) timeout
4581  // (c) thread.interrupt
4582  //
4583  // Thread.interrupt and object.notify{All} both call Event::set.
4584  // That is, we treat thread.interrupt as a special case of notification.
4585  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4586  // We assume all ETIME returns are valid.
4587  //
4588  // TODO: properly differentiate simultaneous notify+interrupt.
4589  // In that case, we should propagate the notify to another waiter.
4590
4591  while (_Event < 0) {
4592    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4593    assert_status(status == 0 || status == ETIMEDOUT,
4594                  status, "cond_timedwait");
4595    if (!FilterSpuriousWakeups) break;         // previous semantics
4596    if (status == ETIMEDOUT) break;
4597    // We consume and ignore EINTR and spurious wakeups.
4598  }
4599  --_nParked;
4600  if (_Event >= 0) {
4601     ret = OS_OK;
4602  }
4603  _Event = 0;
4604  status = pthread_mutex_unlock(_mutex);
4605  assert_status(status == 0, status, "mutex_unlock");
4606  assert (_nParked == 0, "invariant");
4607  return ret;
4608}
4609
4610void os::PlatformEvent::unpark() {
4611  int v, AnyWaiters;
4612  for (;;) {
4613    v = _Event;
4614    if (v > 0) {
4615      // The LD of _Event could have reordered or be satisfied
4616      // by a read-aside from this processor's write buffer.
4617      // To avoid problems execute a barrier and then
4618      // ratify the value.
4619      OrderAccess::fence();
4620      if (_Event == v) return;
4621      continue;
4622    }
4623    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4624  }
4625  if (v < 0) {
4626    // Wait for the thread associated with the event to vacate
4627    int status = pthread_mutex_lock(_mutex);
4628    assert_status(status == 0, status, "mutex_lock");
4629    AnyWaiters = _nParked;
4630
4631    if (AnyWaiters != 0) {
4632      // We intentional signal *after* dropping the lock
4633      // to avoid a common class of futile wakeups.
4634      status = pthread_cond_signal(_cond);
4635      assert_status(status == 0, status, "cond_signal");
4636    }
4637    // Mutex should be locked for pthread_cond_signal(_cond).
4638    status = pthread_mutex_unlock(_mutex);
4639    assert_status(status == 0, status, "mutex_unlock");
4640  }
4641
4642  // Note that we signal() _after dropping the lock for "immortal" Events.
4643  // This is safe and avoids a common class of futile wakeups. In rare
4644  // circumstances this can cause a thread to return prematurely from
4645  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4646  // simply re-test the condition and re-park itself.
4647}
4648
4649
4650// JSR166
4651// -------------------------------------------------------
4652
4653//
4654// The solaris and linux implementations of park/unpark are fairly
4655// conservative for now, but can be improved. They currently use a
4656// mutex/condvar pair, plus a a count.
4657// Park decrements count if > 0, else does a condvar wait. Unpark
4658// sets count to 1 and signals condvar. Only one thread ever waits
4659// on the condvar. Contention seen when trying to park implies that someone
4660// is unparking you, so don't wait. And spurious returns are fine, so there
4661// is no need to track notifications.
4662//
4663
4664#define MAX_SECS 100000000
4665//
4666// This code is common to linux and solaris and will be moved to a
4667// common place in dolphin.
4668//
4669// The passed in time value is either a relative time in nanoseconds
4670// or an absolute time in milliseconds. Either way it has to be unpacked
4671// into suitable seconds and nanoseconds components and stored in the
4672// given timespec structure.
4673// Given time is a 64-bit value and the time_t used in the timespec is only
4674// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4675// overflow if times way in the future are given. Further on Solaris versions
4676// prior to 10 there is a restriction (see cond_timedwait) that the specified
4677// number of seconds, in abstime, is less than current_time + 100,000,000.
4678// As it will be 28 years before "now + 100000000" will overflow we can
4679// ignore overflow and just impose a hard-limit on seconds using the value
4680// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4681// years from "now".
4682//
4683
4684static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4685  assert (time > 0, "convertTime");
4686
4687  struct timeval now;
4688  int status = gettimeofday(&now, NULL);
4689  assert(status == 0, "gettimeofday");
4690
4691  time_t max_secs = now.tv_sec + MAX_SECS;
4692
4693  if (isAbsolute) {
4694    jlong secs = time / 1000;
4695    if (secs > max_secs) {
4696      absTime->tv_sec = max_secs;
4697    }
4698    else {
4699      absTime->tv_sec = secs;
4700    }
4701    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4702  }
4703  else {
4704    jlong secs = time / NANOSECS_PER_SEC;
4705    if (secs >= MAX_SECS) {
4706      absTime->tv_sec = max_secs;
4707      absTime->tv_nsec = 0;
4708    }
4709    else {
4710      absTime->tv_sec = now.tv_sec + secs;
4711      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4712      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4713        absTime->tv_nsec -= NANOSECS_PER_SEC;
4714        ++absTime->tv_sec; // note: this must be <= max_secs
4715      }
4716    }
4717  }
4718  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4719  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4720  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4721  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4722}
4723
4724void Parker::park(bool isAbsolute, jlong time) {
4725  // Optional fast-path check:
4726  // Return immediately if a permit is available.
4727  if (_counter > 0) {
4728    _counter = 0;
4729    OrderAccess::fence();
4730    return;
4731  }
4732
4733  Thread* thread = Thread::current();
4734  assert(thread->is_Java_thread(), "Must be JavaThread");
4735  JavaThread *jt = (JavaThread *)thread;
4736
4737  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4738  // Check interrupt before trying to wait
4739  if (Thread::is_interrupted(thread, false)) {
4740    return;
4741  }
4742
4743  // Next, demultiplex/decode time arguments
4744  timespec absTime;
4745  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4746    return;
4747  }
4748  if (time > 0) {
4749    unpackTime(&absTime, isAbsolute, time);
4750  }
4751
4752  // Enter safepoint region
4753  // Beware of deadlocks such as 6317397.
4754  // The per-thread Parker:: mutex is a classic leaf-lock.
4755  // In particular a thread must never block on the Threads_lock while
4756  // holding the Parker:: mutex. If safepoints are pending both the
4757  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4758  ThreadBlockInVM tbivm(jt);
4759
4760  // Don't wait if cannot get lock since interference arises from
4761  // unblocking. Also. check interrupt before trying wait
4762  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4763    return;
4764  }
4765
4766  int status;
4767  if (_counter > 0) { // no wait needed
4768    _counter = 0;
4769    status = pthread_mutex_unlock(_mutex);
4770    assert (status == 0, "invariant");
4771    OrderAccess::fence();
4772    return;
4773  }
4774
4775#ifdef ASSERT
4776  // Don't catch signals while blocked; let the running threads have the signals.
4777  // (This allows a debugger to break into the running thread.)
4778  sigset_t oldsigs;
4779  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4780  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4781#endif
4782
4783  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4784  jt->set_suspend_equivalent();
4785  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4786
4787  if (time == 0) {
4788    status = pthread_cond_wait (_cond, _mutex);
4789  } else {
4790    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4791  }
4792  assert_status(status == 0 || status == EINTR ||
4793                status == ETIME || status == ETIMEDOUT,
4794                status, "cond_timedwait");
4795
4796#ifdef ASSERT
4797  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4798#endif
4799
4800  _counter = 0;
4801  status = pthread_mutex_unlock(_mutex);
4802  assert_status(status == 0, status, "invariant");
4803  // If externally suspended while waiting, re-suspend
4804  if (jt->handle_special_suspend_equivalent_condition()) {
4805    jt->java_suspend_self();
4806  }
4807
4808  OrderAccess::fence();
4809}
4810
4811void Parker::unpark() {
4812  int s, status;
4813  status = pthread_mutex_lock(_mutex);
4814  assert (status == 0, "invariant");
4815  s = _counter;
4816  _counter = 1;
4817  if (s < 1) {
4818    status = pthread_mutex_unlock(_mutex);
4819    assert (status == 0, "invariant");
4820    status = pthread_cond_signal (_cond);
4821    assert (status == 0, "invariant");
4822  } else {
4823    pthread_mutex_unlock(_mutex);
4824    assert (status == 0, "invariant");
4825  }
4826}
4827
4828extern char** environ;
4829
4830// Run the specified command in a separate process. Return its exit value,
4831// or -1 on failure (e.g. can't fork a new process).
4832// Unlike system(), this function can be called from signal handler. It
4833// doesn't block SIGINT et al.
4834int os::fork_and_exec(char* cmd) {
4835  char * argv[4] = {"sh", "-c", cmd, NULL};
4836
4837  pid_t pid = fork();
4838
4839  if (pid < 0) {
4840    // fork failed
4841    return -1;
4842
4843  } else if (pid == 0) {
4844    // child process
4845
4846    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4847    execve("/usr/bin/sh", argv, environ);
4848
4849    // execve failed
4850    _exit(-1);
4851
4852  } else {
4853    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4854    // care about the actual exit code, for now.
4855
4856    int status;
4857
4858    // Wait for the child process to exit. This returns immediately if
4859    // the child has already exited. */
4860    while (waitpid(pid, &status, 0) < 0) {
4861      switch (errno) {
4862        case ECHILD: return 0;
4863        case EINTR: break;
4864        default: return -1;
4865      }
4866    }
4867
4868    if (WIFEXITED(status)) {
4869      // The child exited normally; get its exit code.
4870      return WEXITSTATUS(status);
4871    } else if (WIFSIGNALED(status)) {
4872      // The child exited because of a signal.
4873      // The best value to return is 0x80 + signal number,
4874      // because that is what all Unix shells do, and because
4875      // it allows callers to distinguish between process exit and
4876      // process death by signal.
4877      return 0x80 + WTERMSIG(status);
4878    } else {
4879      // Unknown exit code; pass it through.
4880      return status;
4881    }
4882  }
4883  return -1;
4884}
4885
4886// is_headless_jre()
4887//
4888// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4889// in order to report if we are running in a headless jre.
4890//
4891// Since JDK8 xawt/libmawt.so is moved into the same directory
4892// as libawt.so, and renamed libawt_xawt.so
4893bool os::is_headless_jre() {
4894  struct stat statbuf;
4895  char buf[MAXPATHLEN];
4896  char libmawtpath[MAXPATHLEN];
4897  const char *xawtstr = "/xawt/libmawt.so";
4898  const char *new_xawtstr = "/libawt_xawt.so";
4899
4900  char *p;
4901
4902  // Get path to libjvm.so
4903  os::jvm_path(buf, sizeof(buf));
4904
4905  // Get rid of libjvm.so
4906  p = strrchr(buf, '/');
4907  if (p == NULL) return false;
4908  else *p = '\0';
4909
4910  // Get rid of client or server
4911  p = strrchr(buf, '/');
4912  if (p == NULL) return false;
4913  else *p = '\0';
4914
4915  // check xawt/libmawt.so
4916  strcpy(libmawtpath, buf);
4917  strcat(libmawtpath, xawtstr);
4918  if (::stat(libmawtpath, &statbuf) == 0) return false;
4919
4920  // check libawt_xawt.so
4921  strcpy(libmawtpath, buf);
4922  strcat(libmawtpath, new_xawtstr);
4923  if (::stat(libmawtpath, &statbuf) == 0) return false;
4924
4925  return true;
4926}
4927
4928// Get the default path to the core file
4929// Returns the length of the string
4930int os::get_core_path(char* buffer, size_t bufferSize) {
4931  const char* p = get_current_directory(buffer, bufferSize);
4932
4933  if (p == NULL) {
4934    assert(p != NULL, "failed to get current directory");
4935    return 0;
4936  }
4937
4938  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4939                                               p, current_process_id());
4940
4941  return strlen(buffer);
4942}
4943
4944#ifndef PRODUCT
4945void TestReserveMemorySpecial_test() {
4946  // No tests available for this platform
4947}
4948#endif
4949
4950bool os::start_debugging(char *buf, int buflen) {
4951  int len = (int)strlen(buf);
4952  char *p = &buf[len];
4953
4954  jio_snprintf(p, buflen -len,
4955                 "\n\n"
4956                 "Do you want to debug the problem?\n\n"
4957                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4958                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4959                 "Otherwise, press RETURN to abort...",
4960                 os::current_process_id(),
4961                 os::current_thread_id(), thread_self());
4962
4963  bool yes = os::message_box("Unexpected Error", buf);
4964
4965  if (yes) {
4966    // yes, user asked VM to launch debugger
4967    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4968
4969    os::fork_and_exec(buf);
4970    yes = false;
4971  }
4972  return yes;
4973}
4974