os_aix.cpp revision 9642:3148b7fc645f
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libperfstat_aix.hpp"
40#include "loadlib_aix.hpp"
41#include "memory/allocation.inline.hpp"
42#include "memory/filemap.hpp"
43#include "misc_aix.hpp"
44#include "mutex_aix.inline.hpp"
45#include "oops/oop.inline.hpp"
46#include "os_aix.inline.hpp"
47#include "os_share_aix.hpp"
48#include "porting_aix.hpp"
49#include "prims/jniFastGetField.hpp"
50#include "prims/jvm.h"
51#include "prims/jvm_misc.hpp"
52#include "runtime/arguments.hpp"
53#include "runtime/atomic.inline.hpp"
54#include "runtime/extendedPC.hpp"
55#include "runtime/globals.hpp"
56#include "runtime/interfaceSupport.hpp"
57#include "runtime/java.hpp"
58#include "runtime/javaCalls.hpp"
59#include "runtime/mutexLocker.hpp"
60#include "runtime/objectMonitor.hpp"
61#include "runtime/orderAccess.inline.hpp"
62#include "runtime/os.hpp"
63#include "runtime/osThread.hpp"
64#include "runtime/perfMemory.hpp"
65#include "runtime/sharedRuntime.hpp"
66#include "runtime/statSampler.hpp"
67#include "runtime/stubRoutines.hpp"
68#include "runtime/thread.inline.hpp"
69#include "runtime/threadCritical.hpp"
70#include "runtime/timer.hpp"
71#include "runtime/vm_version.hpp"
72#include "services/attachListener.hpp"
73#include "services/runtimeService.hpp"
74#include "utilities/decoder.hpp"
75#include "utilities/defaultStream.hpp"
76#include "utilities/events.hpp"
77#include "utilities/growableArray.hpp"
78#include "utilities/vmError.hpp"
79
80// put OS-includes here (sorted alphabetically)
81#include <errno.h>
82#include <fcntl.h>
83#include <inttypes.h>
84#include <poll.h>
85#include <procinfo.h>
86#include <pthread.h>
87#include <pwd.h>
88#include <semaphore.h>
89#include <signal.h>
90#include <stdint.h>
91#include <stdio.h>
92#include <string.h>
93#include <unistd.h>
94#include <sys/ioctl.h>
95#include <sys/ipc.h>
96#include <sys/mman.h>
97#include <sys/resource.h>
98#include <sys/select.h>
99#include <sys/shm.h>
100#include <sys/socket.h>
101#include <sys/stat.h>
102#include <sys/sysinfo.h>
103#include <sys/systemcfg.h>
104#include <sys/time.h>
105#include <sys/times.h>
106#include <sys/types.h>
107#include <sys/utsname.h>
108#include <sys/vminfo.h>
109#include <sys/wait.h>
110
111// If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
112// getrusage() is prepared to handle the associated failure.
113#ifndef RUSAGE_THREAD
114#define RUSAGE_THREAD   (1)               /* only the calling thread */
115#endif
116
117// PPC port
118static const uintx Use64KPagesThreshold       = 1*M;
119static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
120
121// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
122#if !defined(_AIXVERSION_610)
123extern "C" {
124  int getthrds64(pid_t ProcessIdentifier,
125                 struct thrdentry64* ThreadBuffer,
126                 int ThreadSize,
127                 tid64_t* IndexPointer,
128                 int Count);
129}
130#endif
131
132#define MAX_PATH (2 * K)
133
134// for timer info max values which include all bits
135#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
136// for multipage initialization error analysis (in 'g_multipage_error')
137#define ERROR_MP_OS_TOO_OLD                          100
138#define ERROR_MP_EXTSHM_ACTIVE                       101
139#define ERROR_MP_VMGETINFO_FAILED                    102
140#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
141
142// The semantics in this file are thus that codeptr_t is a *real code ptr*.
143// This means that any function taking codeptr_t as arguments will assume
144// a real codeptr and won't handle function descriptors (eg getFuncName),
145// whereas functions taking address as args will deal with function
146// descriptors (eg os::dll_address_to_library_name).
147typedef unsigned int* codeptr_t;
148
149// Typedefs for stackslots, stack pointers, pointers to op codes.
150typedef unsigned long stackslot_t;
151typedef stackslot_t* stackptr_t;
152
153// Excerpts from systemcfg.h definitions newer than AIX 5.3.
154#ifndef PV_7
155#define PV_7 0x200000          /* Power PC 7 */
156#define PV_7_Compat 0x208000   /* Power PC 7 */
157#endif
158#ifndef PV_8
159#define PV_8 0x300000          /* Power PC 8 */
160#define PV_8_Compat 0x308000   /* Power PC 8 */
161#endif
162
163// Query dimensions of the stack of the calling thread.
164static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
165
166// Function to check a given stack pointer against given stack limits.
167inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
168  if (((uintptr_t)sp) & 0x7) {
169    return false;
170  }
171  if (sp > stack_base) {
172    return false;
173  }
174  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
175    return false;
176  }
177  return true;
178}
179
180// Returns true if function is a valid codepointer.
181inline bool is_valid_codepointer(codeptr_t p) {
182  if (!p) {
183    return false;
184  }
185  if (((uintptr_t)p) & 0x3) {
186    return false;
187  }
188  if (!LoadedLibraries::find_for_text_address(p, NULL)) {
189    return false;
190  }
191  return true;
192}
193
194// Macro to check a given stack pointer against given stack limits and to die if test fails.
195#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
196    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
197}
198
199// Macro to check the current stack pointer against given stacklimits.
200#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
201  address sp; \
202  sp = os::current_stack_pointer(); \
203  CHECK_STACK_PTR(sp, stack_base, stack_size); \
204}
205
206////////////////////////////////////////////////////////////////////////////////
207// global variables (for a description see os_aix.hpp)
208
209julong    os::Aix::_physical_memory = 0;
210pthread_t os::Aix::_main_thread = ((pthread_t)0);
211int       os::Aix::_page_size = -1;
212int       os::Aix::_on_pase = -1;
213int       os::Aix::_os_version = -1;
214int       os::Aix::_stack_page_size = -1;
215int       os::Aix::_xpg_sus_mode = -1;
216int       os::Aix::_extshm = -1;
217int       os::Aix::_logical_cpus = -1;
218
219////////////////////////////////////////////////////////////////////////////////
220// local variables
221
222static int      g_multipage_error  = -1;   // error analysis for multipage initialization
223static jlong    initial_time_count = 0;
224static int      clock_tics_per_sec = 100;
225static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
226static bool     check_signals      = true;
227static pid_t    _initial_pid       = 0;
228static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
229static sigset_t SR_sigset;
230
231// This describes the state of multipage support of the underlying
232// OS. Note that this is of no interest to the outsize world and
233// therefore should not be defined in AIX class.
234//
235// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
236// latter two (16M "large" resp. 16G "huge" pages) require special
237// setup and are normally not available.
238//
239// AIX supports multiple page sizes per process, for:
240//  - Stack (of the primordial thread, so not relevant for us)
241//  - Data - data, bss, heap, for us also pthread stacks
242//  - Text - text code
243//  - shared memory
244//
245// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
246// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
247//
248// For shared memory, page size can be set dynamically via
249// shmctl(). Different shared memory regions can have different page
250// sizes.
251//
252// More information can be found at AIBM info center:
253//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
254//
255static struct {
256  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
257  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
258  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
259  size_t pthr_stack_pagesize; // stack page size of pthread threads
260  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
261  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
262  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
263  int error;                  // Error describing if something went wrong at multipage init.
264} g_multipage_support = {
265  (size_t) -1,
266  (size_t) -1,
267  (size_t) -1,
268  (size_t) -1,
269  (size_t) -1,
270  false, false,
271  0
272};
273
274// We must not accidentally allocate memory close to the BRK - even if
275// that would work - because then we prevent the BRK segment from
276// growing which may result in a malloc OOM even though there is
277// enough memory. The problem only arises if we shmat() or mmap() at
278// a specific wish address, e.g. to place the heap in a
279// compressed-oops-friendly way.
280static bool is_close_to_brk(address a) {
281  address a1 = (address) sbrk(0);
282  if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
283    return true;
284  }
285  return false;
286}
287
288julong os::available_memory() {
289  return Aix::available_memory();
290}
291
292julong os::Aix::available_memory() {
293  os::Aix::meminfo_t mi;
294  if (os::Aix::get_meminfo(&mi)) {
295    return mi.real_free;
296  } else {
297    return 0xFFFFFFFFFFFFFFFFLL;
298  }
299}
300
301julong os::physical_memory() {
302  return Aix::physical_memory();
303}
304
305// Return true if user is running as root.
306
307bool os::have_special_privileges() {
308  static bool init = false;
309  static bool privileges = false;
310  if (!init) {
311    privileges = (getuid() != geteuid()) || (getgid() != getegid());
312    init = true;
313  }
314  return privileges;
315}
316
317// Helper function, emulates disclaim64 using multiple 32bit disclaims
318// because we cannot use disclaim64() on AS/400 and old AIX releases.
319static bool my_disclaim64(char* addr, size_t size) {
320
321  if (size == 0) {
322    return true;
323  }
324
325  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
326  const unsigned int maxDisclaimSize = 0x40000000;
327
328  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
329  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
330
331  char* p = addr;
332
333  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
334    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
335      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
336      return false;
337    }
338    p += maxDisclaimSize;
339  }
340
341  if (lastDisclaimSize > 0) {
342    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
343      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
344      return false;
345    }
346  }
347
348  return true;
349}
350
351// Cpu architecture string
352#if defined(PPC32)
353static char cpu_arch[] = "ppc";
354#elif defined(PPC64)
355static char cpu_arch[] = "ppc64";
356#else
357#error Add appropriate cpu_arch setting
358#endif
359
360
361// Given an address, returns the size of the page backing that address.
362size_t os::Aix::query_pagesize(void* addr) {
363
364  vm_page_info pi;
365  pi.addr = (uint64_t)addr;
366  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
367    return pi.pagesize;
368  } else {
369    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
370    assert(false, "vmgetinfo failed to retrieve page size");
371    return SIZE_4K;
372  }
373
374}
375
376// Returns the kernel thread id of the currently running thread.
377pid_t os::Aix::gettid() {
378  return (pid_t) thread_self();
379}
380
381void os::Aix::initialize_system_info() {
382
383  // Get the number of online(logical) cpus instead of configured.
384  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
385  assert(_processor_count > 0, "_processor_count must be > 0");
386
387  // Retrieve total physical storage.
388  os::Aix::meminfo_t mi;
389  if (!os::Aix::get_meminfo(&mi)) {
390    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
391    assert(false, "os::Aix::get_meminfo failed.");
392  }
393  _physical_memory = (julong) mi.real_total;
394}
395
396// Helper function for tracing page sizes.
397static const char* describe_pagesize(size_t pagesize) {
398  switch (pagesize) {
399    case SIZE_4K : return "4K";
400    case SIZE_64K: return "64K";
401    case SIZE_16M: return "16M";
402    case SIZE_16G: return "16G";
403    case -1:       return "not set";
404    default:
405      assert(false, "surprise");
406      return "??";
407  }
408}
409
410// Probe OS for multipage support.
411// Will fill the global g_multipage_support structure.
412// Must be called before calling os::large_page_init().
413static void query_multipage_support() {
414
415  guarantee(g_multipage_support.pagesize == -1,
416            "do not call twice");
417
418  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
419
420  // This really would surprise me.
421  assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
422
423  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
424  // Default data page size is defined either by linker options (-bdatapsize)
425  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
426  // default should be 4K.
427  {
428    void* p = ::malloc(SIZE_16M);
429    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
430    ::free(p);
431  }
432
433  // Query default shm page size (LDR_CNTRL SHMPSIZE).
434  {
435    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
436    guarantee(shmid != -1, "shmget failed");
437    void* p = ::shmat(shmid, NULL, 0);
438    ::shmctl(shmid, IPC_RMID, NULL);
439    guarantee(p != (void*) -1, "shmat failed");
440    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
441    ::shmdt(p);
442  }
443
444  // Before querying the stack page size, make sure we are not running as primordial
445  // thread (because primordial thread's stack may have different page size than
446  // pthread thread stacks). Running a VM on the primordial thread won't work for a
447  // number of reasons so we may just as well guarantee it here.
448  guarantee0(!os::Aix::is_primordial_thread());
449
450  // Query pthread stack page size.
451  {
452    int dummy = 0;
453    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
454  }
455
456  // Query default text page size (LDR_CNTRL TEXTPSIZE).
457  /* PPC port: so far unused.
458  {
459    address any_function =
460      (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
461    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
462  }
463  */
464
465  // Now probe for support of 64K pages and 16M pages.
466
467  // Before OS/400 V6R1, there is no support for pages other than 4K.
468  if (os::Aix::on_pase_V5R4_or_older()) {
469    Unimplemented();
470    goto query_multipage_support_end;
471  }
472
473  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
474  {
475    const int MAX_PAGE_SIZES = 4;
476    psize_t sizes[MAX_PAGE_SIZES];
477    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
478    if (num_psizes == -1) {
479      trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
480      trc("disabling multipage support.\n");
481      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
482      goto query_multipage_support_end;
483    }
484    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
485    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
486    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
487    for (int i = 0; i < num_psizes; i ++) {
488      trcVerbose(" %s ", describe_pagesize(sizes[i]));
489    }
490
491    // Can we use 64K, 16M pages?
492    for (int i = 0; i < num_psizes; i ++) {
493      const size_t pagesize = sizes[i];
494      if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
495        continue;
496      }
497      bool can_use = false;
498      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
499      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
500        IPC_CREAT | S_IRUSR | S_IWUSR);
501      guarantee0(shmid != -1); // Should always work.
502      // Try to set pagesize.
503      struct shmid_ds shm_buf = { 0 };
504      shm_buf.shm_pagesize = pagesize;
505      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
506        const int en = errno;
507        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
508        // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
509        // PPC port  MiscUtils::describe_errno(en));
510      } else {
511        // Attach and double check pageisze.
512        void* p = ::shmat(shmid, NULL, 0);
513        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
514        guarantee0(p != (void*) -1); // Should always work.
515        const size_t real_pagesize = os::Aix::query_pagesize(p);
516        if (real_pagesize != pagesize) {
517          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
518        } else {
519          can_use = true;
520        }
521        ::shmdt(p);
522      }
523      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
524      if (pagesize == SIZE_64K) {
525        g_multipage_support.can_use_64K_pages = can_use;
526      } else if (pagesize == SIZE_16M) {
527        g_multipage_support.can_use_16M_pages = can_use;
528      }
529    }
530
531  } // end: check which pages can be used for shared memory
532
533query_multipage_support_end:
534
535  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
536      describe_pagesize(g_multipage_support.pagesize));
537  trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
538      describe_pagesize(g_multipage_support.datapsize));
539  trcVerbose("Text page size: %s\n",
540      describe_pagesize(g_multipage_support.textpsize));
541  trcVerbose("Thread stack page size (pthread): %s\n",
542      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
543  trcVerbose("Default shared memory page size: %s\n",
544      describe_pagesize(g_multipage_support.shmpsize));
545  trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
546      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
547  trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
548      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
549  trcVerbose("Multipage error details: %d\n",
550      g_multipage_support.error);
551
552  // sanity checks
553  assert0(g_multipage_support.pagesize == SIZE_4K);
554  assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
555  // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
556  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
557  assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
558
559} // end os::Aix::query_multipage_support()
560
561void os::init_system_properties_values() {
562
563#define DEFAULT_LIBPATH "/usr/lib:/lib"
564#define EXTENSIONS_DIR  "/lib/ext"
565
566  // Buffer that fits several sprintfs.
567  // Note that the space for the trailing null is provided
568  // by the nulls included by the sizeof operator.
569  const size_t bufsize =
570    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
571         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
572  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
573
574  // sysclasspath, java_home, dll_dir
575  {
576    char *pslash;
577    os::jvm_path(buf, bufsize);
578
579    // Found the full path to libjvm.so.
580    // Now cut the path to <java_home>/jre if we can.
581    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
582    pslash = strrchr(buf, '/');
583    if (pslash != NULL) {
584      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
585    }
586    Arguments::set_dll_dir(buf);
587
588    if (pslash != NULL) {
589      pslash = strrchr(buf, '/');
590      if (pslash != NULL) {
591        *pslash = '\0';          // Get rid of /<arch>.
592        pslash = strrchr(buf, '/');
593        if (pslash != NULL) {
594          *pslash = '\0';        // Get rid of /lib.
595        }
596      }
597    }
598    Arguments::set_java_home(buf);
599    set_boot_path('/', ':');
600  }
601
602  // Where to look for native libraries.
603
604  // On Aix we get the user setting of LIBPATH.
605  // Eventually, all the library path setting will be done here.
606  // Get the user setting of LIBPATH.
607  const char *v = ::getenv("LIBPATH");
608  const char *v_colon = ":";
609  if (v == NULL) { v = ""; v_colon = ""; }
610
611  // Concatenate user and invariant part of ld_library_path.
612  // That's +1 for the colon and +1 for the trailing '\0'.
613  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
614  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
615  Arguments::set_library_path(ld_library_path);
616  FREE_C_HEAP_ARRAY(char, ld_library_path);
617
618  // Extensions directories.
619  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
620  Arguments::set_ext_dirs(buf);
621
622  FREE_C_HEAP_ARRAY(char, buf);
623
624#undef DEFAULT_LIBPATH
625#undef EXTENSIONS_DIR
626}
627
628////////////////////////////////////////////////////////////////////////////////
629// breakpoint support
630
631void os::breakpoint() {
632  BREAKPOINT;
633}
634
635extern "C" void breakpoint() {
636  // use debugger to set breakpoint here
637}
638
639////////////////////////////////////////////////////////////////////////////////
640// signal support
641
642debug_only(static bool signal_sets_initialized = false);
643static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
644
645bool os::Aix::is_sig_ignored(int sig) {
646  struct sigaction oact;
647  sigaction(sig, (struct sigaction*)NULL, &oact);
648  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
649    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
650  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
651    return true;
652  } else {
653    return false;
654  }
655}
656
657void os::Aix::signal_sets_init() {
658  // Should also have an assertion stating we are still single-threaded.
659  assert(!signal_sets_initialized, "Already initialized");
660  // Fill in signals that are necessarily unblocked for all threads in
661  // the VM. Currently, we unblock the following signals:
662  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
663  //                         by -Xrs (=ReduceSignalUsage));
664  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
665  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
666  // the dispositions or masks wrt these signals.
667  // Programs embedding the VM that want to use the above signals for their
668  // own purposes must, at this time, use the "-Xrs" option to prevent
669  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
670  // (See bug 4345157, and other related bugs).
671  // In reality, though, unblocking these signals is really a nop, since
672  // these signals are not blocked by default.
673  sigemptyset(&unblocked_sigs);
674  sigemptyset(&allowdebug_blocked_sigs);
675  sigaddset(&unblocked_sigs, SIGILL);
676  sigaddset(&unblocked_sigs, SIGSEGV);
677  sigaddset(&unblocked_sigs, SIGBUS);
678  sigaddset(&unblocked_sigs, SIGFPE);
679  sigaddset(&unblocked_sigs, SIGTRAP);
680  sigaddset(&unblocked_sigs, SIGDANGER);
681  sigaddset(&unblocked_sigs, SR_signum);
682
683  if (!ReduceSignalUsage) {
684   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
685     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
686     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
687   }
688   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
689     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
690     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
691   }
692   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
693     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
694     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
695   }
696  }
697  // Fill in signals that are blocked by all but the VM thread.
698  sigemptyset(&vm_sigs);
699  if (!ReduceSignalUsage)
700    sigaddset(&vm_sigs, BREAK_SIGNAL);
701  debug_only(signal_sets_initialized = true);
702}
703
704// These are signals that are unblocked while a thread is running Java.
705// (For some reason, they get blocked by default.)
706sigset_t* os::Aix::unblocked_signals() {
707  assert(signal_sets_initialized, "Not initialized");
708  return &unblocked_sigs;
709}
710
711// These are the signals that are blocked while a (non-VM) thread is
712// running Java. Only the VM thread handles these signals.
713sigset_t* os::Aix::vm_signals() {
714  assert(signal_sets_initialized, "Not initialized");
715  return &vm_sigs;
716}
717
718// These are signals that are blocked during cond_wait to allow debugger in
719sigset_t* os::Aix::allowdebug_blocked_signals() {
720  assert(signal_sets_initialized, "Not initialized");
721  return &allowdebug_blocked_sigs;
722}
723
724void os::Aix::hotspot_sigmask(Thread* thread) {
725
726  //Save caller's signal mask before setting VM signal mask
727  sigset_t caller_sigmask;
728  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
729
730  OSThread* osthread = thread->osthread();
731  osthread->set_caller_sigmask(caller_sigmask);
732
733  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
734
735  if (!ReduceSignalUsage) {
736    if (thread->is_VM_thread()) {
737      // Only the VM thread handles BREAK_SIGNAL ...
738      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
739    } else {
740      // ... all other threads block BREAK_SIGNAL
741      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
742    }
743  }
744}
745
746// retrieve memory information.
747// Returns false if something went wrong;
748// content of pmi undefined in this case.
749bool os::Aix::get_meminfo(meminfo_t* pmi) {
750
751  assert(pmi, "get_meminfo: invalid parameter");
752
753  memset(pmi, 0, sizeof(meminfo_t));
754
755  if (os::Aix::on_pase()) {
756
757    Unimplemented();
758    return false;
759
760  } else {
761
762    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
763    // See:
764    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
765    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
766    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
767    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
768
769    perfstat_memory_total_t psmt;
770    memset (&psmt, '\0', sizeof(psmt));
771    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
772    if (rc == -1) {
773      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
774      assert(0, "perfstat_memory_total() failed");
775      return false;
776    }
777
778    assert(rc == 1, "perfstat_memory_total() - weird return code");
779
780    // excerpt from
781    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
782    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
783    // The fields of perfstat_memory_total_t:
784    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
785    // u_longlong_t real_total         Total real memory (in 4 KB pages).
786    // u_longlong_t real_free          Free real memory (in 4 KB pages).
787    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
788    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
789
790    pmi->virt_total = psmt.virt_total * 4096;
791    pmi->real_total = psmt.real_total * 4096;
792    pmi->real_free = psmt.real_free * 4096;
793    pmi->pgsp_total = psmt.pgsp_total * 4096;
794    pmi->pgsp_free = psmt.pgsp_free * 4096;
795
796    return true;
797
798  }
799} // end os::Aix::get_meminfo
800
801// Retrieve global cpu information.
802// Returns false if something went wrong;
803// the content of pci is undefined in this case.
804bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
805  assert(pci, "get_cpuinfo: invalid parameter");
806  memset(pci, 0, sizeof(cpuinfo_t));
807
808  perfstat_cpu_total_t psct;
809  memset (&psct, '\0', sizeof(psct));
810
811  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
812    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
813    assert(0, "perfstat_cpu_total() failed");
814    return false;
815  }
816
817  // global cpu information
818  strcpy (pci->description, psct.description);
819  pci->processorHZ = psct.processorHZ;
820  pci->ncpus = psct.ncpus;
821  os::Aix::_logical_cpus = psct.ncpus;
822  for (int i = 0; i < 3; i++) {
823    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
824  }
825
826  // get the processor version from _system_configuration
827  switch (_system_configuration.version) {
828  case PV_8:
829    strcpy(pci->version, "Power PC 8");
830    break;
831  case PV_7:
832    strcpy(pci->version, "Power PC 7");
833    break;
834  case PV_6_1:
835    strcpy(pci->version, "Power PC 6 DD1.x");
836    break;
837  case PV_6:
838    strcpy(pci->version, "Power PC 6");
839    break;
840  case PV_5:
841    strcpy(pci->version, "Power PC 5");
842    break;
843  case PV_5_2:
844    strcpy(pci->version, "Power PC 5_2");
845    break;
846  case PV_5_3:
847    strcpy(pci->version, "Power PC 5_3");
848    break;
849  case PV_5_Compat:
850    strcpy(pci->version, "PV_5_Compat");
851    break;
852  case PV_6_Compat:
853    strcpy(pci->version, "PV_6_Compat");
854    break;
855  case PV_7_Compat:
856    strcpy(pci->version, "PV_7_Compat");
857    break;
858  case PV_8_Compat:
859    strcpy(pci->version, "PV_8_Compat");
860    break;
861  default:
862    strcpy(pci->version, "unknown");
863  }
864
865  return true;
866
867} //end os::Aix::get_cpuinfo
868
869//////////////////////////////////////////////////////////////////////////////
870// detecting pthread library
871
872void os::Aix::libpthread_init() {
873  return;
874}
875
876//////////////////////////////////////////////////////////////////////////////
877// create new thread
878
879// Thread start routine for all newly created threads
880static void *java_start(Thread *thread) {
881
882  // find out my own stack dimensions
883  {
884    // actually, this should do exactly the same as thread->record_stack_base_and_size...
885    address base = 0;
886    size_t size = 0;
887    query_stack_dimensions(&base, &size);
888    thread->set_stack_base(base);
889    thread->set_stack_size(size);
890  }
891
892  // Do some sanity checks.
893  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
894
895  // Try to randomize the cache line index of hot stack frames.
896  // This helps when threads of the same stack traces evict each other's
897  // cache lines. The threads can be either from the same JVM instance, or
898  // from different JVM instances. The benefit is especially true for
899  // processors with hyperthreading technology.
900
901  static int counter = 0;
902  int pid = os::current_process_id();
903  alloca(((pid ^ counter++) & 7) * 128);
904
905  ThreadLocalStorage::set_thread(thread);
906
907  OSThread* osthread = thread->osthread();
908
909  // thread_id is kernel thread id (similar to Solaris LWP id)
910  osthread->set_thread_id(os::Aix::gettid());
911
912  // initialize signal mask for this thread
913  os::Aix::hotspot_sigmask(thread);
914
915  // initialize floating point control register
916  os::Aix::init_thread_fpu_state();
917
918  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
919
920  // call one more level start routine
921  thread->run();
922
923  return 0;
924}
925
926bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
927
928  // We want the whole function to be synchronized.
929  ThreadCritical cs;
930
931  assert(thread->osthread() == NULL, "caller responsible");
932
933  // Allocate the OSThread object
934  OSThread* osthread = new OSThread(NULL, NULL);
935  if (osthread == NULL) {
936    return false;
937  }
938
939  // set the correct thread state
940  osthread->set_thread_type(thr_type);
941
942  // Initial state is ALLOCATED but not INITIALIZED
943  osthread->set_state(ALLOCATED);
944
945  thread->set_osthread(osthread);
946
947  // init thread attributes
948  pthread_attr_t attr;
949  pthread_attr_init(&attr);
950  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
951
952  // Make sure we run in 1:1 kernel-user-thread mode.
953  if (os::Aix::on_aix()) {
954    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
955    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
956  } // end: aix
957
958  // Start in suspended state, and in os::thread_start, wake the thread up.
959  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
960
961  // calculate stack size if it's not specified by caller
962  if (stack_size == 0) {
963    stack_size = os::Aix::default_stack_size(thr_type);
964
965    switch (thr_type) {
966    case os::java_thread:
967      // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
968      assert(JavaThread::stack_size_at_create() > 0, "this should be set");
969      stack_size = JavaThread::stack_size_at_create();
970      break;
971    case os::compiler_thread:
972      if (CompilerThreadStackSize > 0) {
973        stack_size = (size_t)(CompilerThreadStackSize * K);
974        break;
975      } // else fall through:
976        // use VMThreadStackSize if CompilerThreadStackSize is not defined
977    case os::vm_thread:
978    case os::pgc_thread:
979    case os::cgc_thread:
980    case os::watcher_thread:
981      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
982      break;
983    }
984  }
985
986  stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
987  pthread_attr_setstacksize(&attr, stack_size);
988
989  pthread_t tid;
990  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
991
992  pthread_attr_destroy(&attr);
993
994  if (ret == 0) {
995    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
996  } else {
997    if (PrintMiscellaneous && (Verbose || WizardMode)) {
998      perror("pthread_create()");
999    }
1000    // Need to clean up stuff we've allocated so far
1001    thread->set_osthread(NULL);
1002    delete osthread;
1003    return false;
1004  }
1005
1006  // Store pthread info into the OSThread
1007  osthread->set_pthread_id(tid);
1008
1009  return true;
1010}
1011
1012/////////////////////////////////////////////////////////////////////////////
1013// attach existing thread
1014
1015// bootstrap the main thread
1016bool os::create_main_thread(JavaThread* thread) {
1017  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1018  return create_attached_thread(thread);
1019}
1020
1021bool os::create_attached_thread(JavaThread* thread) {
1022#ifdef ASSERT
1023    thread->verify_not_published();
1024#endif
1025
1026  // Allocate the OSThread object
1027  OSThread* osthread = new OSThread(NULL, NULL);
1028
1029  if (osthread == NULL) {
1030    return false;
1031  }
1032
1033  // Store pthread info into the OSThread
1034  osthread->set_thread_id(os::Aix::gettid());
1035  osthread->set_pthread_id(::pthread_self());
1036
1037  // initialize floating point control register
1038  os::Aix::init_thread_fpu_state();
1039
1040  // some sanity checks
1041  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1042
1043  // Initial thread state is RUNNABLE
1044  osthread->set_state(RUNNABLE);
1045
1046  thread->set_osthread(osthread);
1047
1048  if (UseNUMA) {
1049    int lgrp_id = os::numa_get_group_id();
1050    if (lgrp_id != -1) {
1051      thread->set_lgrp_id(lgrp_id);
1052    }
1053  }
1054
1055  // initialize signal mask for this thread
1056  // and save the caller's signal mask
1057  os::Aix::hotspot_sigmask(thread);
1058
1059  return true;
1060}
1061
1062void os::pd_start_thread(Thread* thread) {
1063  int status = pthread_continue_np(thread->osthread()->pthread_id());
1064  assert(status == 0, "thr_continue failed");
1065}
1066
1067// Free OS resources related to the OSThread
1068void os::free_thread(OSThread* osthread) {
1069  assert(osthread != NULL, "osthread not set");
1070
1071  if (Thread::current()->osthread() == osthread) {
1072    // Restore caller's signal mask
1073    sigset_t sigmask = osthread->caller_sigmask();
1074    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1075   }
1076
1077  delete osthread;
1078}
1079
1080//////////////////////////////////////////////////////////////////////////////
1081// thread local storage
1082
1083int os::allocate_thread_local_storage() {
1084  pthread_key_t key;
1085  int rslt = pthread_key_create(&key, NULL);
1086  assert(rslt == 0, "cannot allocate thread local storage");
1087  return (int)key;
1088}
1089
1090// Note: This is currently not used by VM, as we don't destroy TLS key
1091// on VM exit.
1092void os::free_thread_local_storage(int index) {
1093  int rslt = pthread_key_delete((pthread_key_t)index);
1094  assert(rslt == 0, "invalid index");
1095}
1096
1097void os::thread_local_storage_at_put(int index, void* value) {
1098  int rslt = pthread_setspecific((pthread_key_t)index, value);
1099  assert(rslt == 0, "pthread_setspecific failed");
1100}
1101
1102extern "C" Thread* get_thread() {
1103  return ThreadLocalStorage::thread();
1104}
1105
1106////////////////////////////////////////////////////////////////////////////////
1107// time support
1108
1109// Time since start-up in seconds to a fine granularity.
1110// Used by VMSelfDestructTimer and the MemProfiler.
1111double os::elapsedTime() {
1112  return (double)(os::elapsed_counter()) * 0.000001;
1113}
1114
1115jlong os::elapsed_counter() {
1116  timeval time;
1117  int status = gettimeofday(&time, NULL);
1118  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1119}
1120
1121jlong os::elapsed_frequency() {
1122  return (1000 * 1000);
1123}
1124
1125bool os::supports_vtime() { return true; }
1126bool os::enable_vtime()   { return false; }
1127bool os::vtime_enabled()  { return false; }
1128
1129double os::elapsedVTime() {
1130  struct rusage usage;
1131  int retval = getrusage(RUSAGE_THREAD, &usage);
1132  if (retval == 0) {
1133    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1134  } else {
1135    // better than nothing, but not much
1136    return elapsedTime();
1137  }
1138}
1139
1140jlong os::javaTimeMillis() {
1141  timeval time;
1142  int status = gettimeofday(&time, NULL);
1143  assert(status != -1, "aix error at gettimeofday()");
1144  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1145}
1146
1147void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1148  timeval time;
1149  int status = gettimeofday(&time, NULL);
1150  assert(status != -1, "aix error at gettimeofday()");
1151  seconds = jlong(time.tv_sec);
1152  nanos = jlong(time.tv_usec) * 1000;
1153}
1154
1155
1156// We need to manually declare mread_real_time,
1157// because IBM didn't provide a prototype in time.h.
1158// (they probably only ever tested in C, not C++)
1159extern "C"
1160int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1161
1162jlong os::javaTimeNanos() {
1163  if (os::Aix::on_pase()) {
1164    Unimplemented();
1165    return 0;
1166  } else {
1167    // On AIX use the precision of processors real time clock
1168    // or time base registers.
1169    timebasestruct_t time;
1170    int rc;
1171
1172    // If the CPU has a time register, it will be used and
1173    // we have to convert to real time first. After convertion we have following data:
1174    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1175    // time.tb_low  [nanoseconds after the last full second above]
1176    // We better use mread_real_time here instead of read_real_time
1177    // to ensure that we will get a monotonic increasing time.
1178    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1179      rc = time_base_to_time(&time, TIMEBASE_SZ);
1180      assert(rc != -1, "aix error at time_base_to_time()");
1181    }
1182    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1183  }
1184}
1185
1186void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1187  info_ptr->max_value = ALL_64_BITS;
1188  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1189  info_ptr->may_skip_backward = false;
1190  info_ptr->may_skip_forward = false;
1191  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1192}
1193
1194// Return the real, user, and system times in seconds from an
1195// arbitrary fixed point in the past.
1196bool os::getTimesSecs(double* process_real_time,
1197                      double* process_user_time,
1198                      double* process_system_time) {
1199  struct tms ticks;
1200  clock_t real_ticks = times(&ticks);
1201
1202  if (real_ticks == (clock_t) (-1)) {
1203    return false;
1204  } else {
1205    double ticks_per_second = (double) clock_tics_per_sec;
1206    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1207    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1208    *process_real_time = ((double) real_ticks) / ticks_per_second;
1209
1210    return true;
1211  }
1212}
1213
1214char * os::local_time_string(char *buf, size_t buflen) {
1215  struct tm t;
1216  time_t long_time;
1217  time(&long_time);
1218  localtime_r(&long_time, &t);
1219  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1220               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1221               t.tm_hour, t.tm_min, t.tm_sec);
1222  return buf;
1223}
1224
1225struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1226  return localtime_r(clock, res);
1227}
1228
1229////////////////////////////////////////////////////////////////////////////////
1230// runtime exit support
1231
1232// Note: os::shutdown() might be called very early during initialization, or
1233// called from signal handler. Before adding something to os::shutdown(), make
1234// sure it is async-safe and can handle partially initialized VM.
1235void os::shutdown() {
1236
1237  // allow PerfMemory to attempt cleanup of any persistent resources
1238  perfMemory_exit();
1239
1240  // needs to remove object in file system
1241  AttachListener::abort();
1242
1243  // flush buffered output, finish log files
1244  ostream_abort();
1245
1246  // Check for abort hook
1247  abort_hook_t abort_hook = Arguments::abort_hook();
1248  if (abort_hook != NULL) {
1249    abort_hook();
1250  }
1251}
1252
1253// Note: os::abort() might be called very early during initialization, or
1254// called from signal handler. Before adding something to os::abort(), make
1255// sure it is async-safe and can handle partially initialized VM.
1256void os::abort(bool dump_core, void* siginfo, void* context) {
1257  os::shutdown();
1258  if (dump_core) {
1259#ifndef PRODUCT
1260    fdStream out(defaultStream::output_fd());
1261    out.print_raw("Current thread is ");
1262    char buf[16];
1263    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1264    out.print_raw_cr(buf);
1265    out.print_raw_cr("Dumping core ...");
1266#endif
1267    ::abort(); // dump core
1268  }
1269
1270  ::exit(1);
1271}
1272
1273// Die immediately, no exit hook, no abort hook, no cleanup.
1274void os::die() {
1275  ::abort();
1276}
1277
1278// This method is a copy of JDK's sysGetLastErrorString
1279// from src/solaris/hpi/src/system_md.c
1280
1281size_t os::lasterror(char *buf, size_t len) {
1282  if (errno == 0) return 0;
1283
1284  const char *s = ::strerror(errno);
1285  size_t n = ::strlen(s);
1286  if (n >= len) {
1287    n = len - 1;
1288  }
1289  ::strncpy(buf, s, n);
1290  buf[n] = '\0';
1291  return n;
1292}
1293
1294intx os::current_thread_id() { return (intx)pthread_self(); }
1295
1296int os::current_process_id() {
1297
1298  // This implementation returns a unique pid, the pid of the
1299  // launcher thread that starts the vm 'process'.
1300
1301  // Under POSIX, getpid() returns the same pid as the
1302  // launcher thread rather than a unique pid per thread.
1303  // Use gettid() if you want the old pre NPTL behaviour.
1304
1305  // if you are looking for the result of a call to getpid() that
1306  // returns a unique pid for the calling thread, then look at the
1307  // OSThread::thread_id() method in osThread_linux.hpp file
1308
1309  return (int)(_initial_pid ? _initial_pid : getpid());
1310}
1311
1312// DLL functions
1313
1314const char* os::dll_file_extension() { return ".so"; }
1315
1316// This must be hard coded because it's the system's temporary
1317// directory not the java application's temp directory, ala java.io.tmpdir.
1318const char* os::get_temp_directory() { return "/tmp"; }
1319
1320static bool file_exists(const char* filename) {
1321  struct stat statbuf;
1322  if (filename == NULL || strlen(filename) == 0) {
1323    return false;
1324  }
1325  return os::stat(filename, &statbuf) == 0;
1326}
1327
1328bool os::dll_build_name(char* buffer, size_t buflen,
1329                        const char* pname, const char* fname) {
1330  bool retval = false;
1331  // Copied from libhpi
1332  const size_t pnamelen = pname ? strlen(pname) : 0;
1333
1334  // Return error on buffer overflow.
1335  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1336    *buffer = '\0';
1337    return retval;
1338  }
1339
1340  if (pnamelen == 0) {
1341    snprintf(buffer, buflen, "lib%s.so", fname);
1342    retval = true;
1343  } else if (strchr(pname, *os::path_separator()) != NULL) {
1344    int n;
1345    char** pelements = split_path(pname, &n);
1346    for (int i = 0; i < n; i++) {
1347      // Really shouldn't be NULL, but check can't hurt
1348      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1349        continue; // skip the empty path values
1350      }
1351      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1352      if (file_exists(buffer)) {
1353        retval = true;
1354        break;
1355      }
1356    }
1357    // release the storage
1358    for (int i = 0; i < n; i++) {
1359      if (pelements[i] != NULL) {
1360        FREE_C_HEAP_ARRAY(char, pelements[i]);
1361      }
1362    }
1363    if (pelements != NULL) {
1364      FREE_C_HEAP_ARRAY(char*, pelements);
1365    }
1366  } else {
1367    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1368    retval = true;
1369  }
1370  return retval;
1371}
1372
1373// Check if addr is inside libjvm.so.
1374bool os::address_is_in_vm(address addr) {
1375
1376  // Input could be a real pc or a function pointer literal. The latter
1377  // would be a function descriptor residing in the data segment of a module.
1378  loaded_module_t lm;
1379  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1380    return lm.is_in_vm;
1381  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1382    return lm.is_in_vm;
1383  } else {
1384    return false;
1385  }
1386
1387}
1388
1389// Resolve an AIX function descriptor literal to a code pointer.
1390// If the input is a valid code pointer to a text segment of a loaded module,
1391//   it is returned unchanged.
1392// If the input is a valid AIX function descriptor, it is resolved to the
1393//   code entry point.
1394// If the input is neither a valid function descriptor nor a valid code pointer,
1395//   NULL is returned.
1396static address resolve_function_descriptor_to_code_pointer(address p) {
1397
1398  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1399    // It is a real code pointer.
1400    return p;
1401  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1402    // Pointer to data segment, potential function descriptor.
1403    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1404    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1405      // It is a function descriptor.
1406      return code_entry;
1407    }
1408  }
1409
1410  return NULL;
1411}
1412
1413bool os::dll_address_to_function_name(address addr, char *buf,
1414                                      int buflen, int *offset,
1415                                      bool demangle) {
1416  if (offset) {
1417    *offset = -1;
1418  }
1419  // Buf is not optional, but offset is optional.
1420  assert(buf != NULL, "sanity check");
1421  buf[0] = '\0';
1422
1423  // Resolve function ptr literals first.
1424  addr = resolve_function_descriptor_to_code_pointer(addr);
1425  if (!addr) {
1426    return false;
1427  }
1428
1429  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1430  return Decoder::decode(addr, buf, buflen, offset, demangle);
1431}
1432
1433static int getModuleName(codeptr_t pc,                    // [in] program counter
1434                         char* p_name, size_t namelen,    // [out] optional: function name
1435                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1436                         ) {
1437
1438  if (p_name && namelen > 0) {
1439    *p_name = '\0';
1440  }
1441  if (p_errmsg && errmsglen > 0) {
1442    *p_errmsg = '\0';
1443  }
1444
1445  if (p_name && namelen > 0) {
1446    loaded_module_t lm;
1447    if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
1448      strncpy(p_name, lm.shortname, namelen);
1449      p_name[namelen - 1] = '\0';
1450    }
1451    return 0;
1452  }
1453
1454  return -1;
1455}
1456
1457bool os::dll_address_to_library_name(address addr, char* buf,
1458                                     int buflen, int* offset) {
1459  if (offset) {
1460    *offset = -1;
1461  }
1462  // Buf is not optional, but offset is optional.
1463  assert(buf != NULL, "sanity check");
1464  buf[0] = '\0';
1465
1466  // Resolve function ptr literals first.
1467  addr = resolve_function_descriptor_to_code_pointer(addr);
1468  if (!addr) {
1469    return false;
1470  }
1471
1472  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1473    return true;
1474  }
1475  return false;
1476}
1477
1478// Loads .dll/.so and in case of error it checks if .dll/.so was built
1479// for the same architecture as Hotspot is running on.
1480void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1481
1482  if (ebuf && ebuflen > 0) {
1483    ebuf[0] = '\0';
1484    ebuf[ebuflen - 1] = '\0';
1485  }
1486
1487  if (!filename || strlen(filename) == 0) {
1488    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1489    return NULL;
1490  }
1491
1492  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1493  void * result= ::dlopen(filename, RTLD_LAZY);
1494  if (result != NULL) {
1495    // Reload dll cache. Don't do this in signal handling.
1496    LoadedLibraries::reload();
1497    return result;
1498  } else {
1499    // error analysis when dlopen fails
1500    const char* const error_report = ::dlerror();
1501    if (error_report && ebuf && ebuflen > 0) {
1502      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1503               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1504    }
1505  }
1506  return NULL;
1507}
1508
1509void* os::dll_lookup(void* handle, const char* name) {
1510  void* res = dlsym(handle, name);
1511  return res;
1512}
1513
1514void* os::get_default_process_handle() {
1515  return (void*)::dlopen(NULL, RTLD_LAZY);
1516}
1517
1518void os::print_dll_info(outputStream *st) {
1519  st->print_cr("Dynamic libraries:");
1520  LoadedLibraries::print(st);
1521}
1522
1523void os::get_summary_os_info(char* buf, size_t buflen) {
1524  // There might be something more readable than uname results for AIX.
1525  struct utsname name;
1526  uname(&name);
1527  snprintf(buf, buflen, "%s %s", name.release, name.version);
1528}
1529
1530void os::print_os_info(outputStream* st) {
1531  st->print("OS:");
1532
1533  st->print("uname:");
1534  struct utsname name;
1535  uname(&name);
1536  st->print(name.sysname); st->print(" ");
1537  st->print(name.nodename); st->print(" ");
1538  st->print(name.release); st->print(" ");
1539  st->print(name.version); st->print(" ");
1540  st->print(name.machine);
1541  st->cr();
1542
1543  // rlimit
1544  st->print("rlimit:");
1545  struct rlimit rlim;
1546
1547  st->print(" STACK ");
1548  getrlimit(RLIMIT_STACK, &rlim);
1549  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1550  else st->print("%uk", rlim.rlim_cur >> 10);
1551
1552  st->print(", CORE ");
1553  getrlimit(RLIMIT_CORE, &rlim);
1554  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1555  else st->print("%uk", rlim.rlim_cur >> 10);
1556
1557  st->print(", NPROC ");
1558  st->print("%d", sysconf(_SC_CHILD_MAX));
1559
1560  st->print(", NOFILE ");
1561  getrlimit(RLIMIT_NOFILE, &rlim);
1562  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1563  else st->print("%d", rlim.rlim_cur);
1564
1565  st->print(", AS ");
1566  getrlimit(RLIMIT_AS, &rlim);
1567  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1568  else st->print("%uk", rlim.rlim_cur >> 10);
1569
1570  // Print limits on DATA, because it limits the C-heap.
1571  st->print(", DATA ");
1572  getrlimit(RLIMIT_DATA, &rlim);
1573  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1574  else st->print("%uk", rlim.rlim_cur >> 10);
1575  st->cr();
1576
1577  // load average
1578  st->print("load average:");
1579  double loadavg[3] = {-1.L, -1.L, -1.L};
1580  os::loadavg(loadavg, 3);
1581  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1582  st->cr();
1583}
1584
1585void os::print_memory_info(outputStream* st) {
1586
1587  st->print_cr("Memory:");
1588
1589  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1590  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1591  st->print_cr("  Default shared memory page size:        %s",
1592    describe_pagesize(g_multipage_support.shmpsize));
1593  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1594    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1595  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1596    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1597  if (g_multipage_error != 0) {
1598    st->print_cr("  multipage error: %d", g_multipage_error);
1599  }
1600
1601  // print out LDR_CNTRL because it affects the default page sizes
1602  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1603  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1604
1605  const char* const extshm = ::getenv("EXTSHM");
1606  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1607  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1608    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1609  }
1610
1611  // Call os::Aix::get_meminfo() to retrieve memory statistics.
1612  os::Aix::meminfo_t mi;
1613  if (os::Aix::get_meminfo(&mi)) {
1614    char buffer[256];
1615    if (os::Aix::on_aix()) {
1616      jio_snprintf(buffer, sizeof(buffer),
1617                   "  physical total : %llu\n"
1618                   "  physical free  : %llu\n"
1619                   "  swap total     : %llu\n"
1620                   "  swap free      : %llu\n",
1621                   mi.real_total,
1622                   mi.real_free,
1623                   mi.pgsp_total,
1624                   mi.pgsp_free);
1625    } else {
1626      Unimplemented();
1627    }
1628    st->print_raw(buffer);
1629  } else {
1630    st->print_cr("  (no more information available)");
1631  }
1632}
1633
1634// Get a string for the cpuinfo that is a summary of the cpu type
1635void os::get_summary_cpu_info(char* buf, size_t buflen) {
1636  // This looks good
1637  os::Aix::cpuinfo_t ci;
1638  if (os::Aix::get_cpuinfo(&ci)) {
1639    strncpy(buf, ci.version, buflen);
1640  } else {
1641    strncpy(buf, "AIX", buflen);
1642  }
1643}
1644
1645void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1646}
1647
1648void os::print_siginfo(outputStream* st, void* siginfo) {
1649  // Use common posix version.
1650  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1651  st->cr();
1652}
1653
1654static void print_signal_handler(outputStream* st, int sig,
1655                                 char* buf, size_t buflen);
1656
1657void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1658  st->print_cr("Signal Handlers:");
1659  print_signal_handler(st, SIGSEGV, buf, buflen);
1660  print_signal_handler(st, SIGBUS , buf, buflen);
1661  print_signal_handler(st, SIGFPE , buf, buflen);
1662  print_signal_handler(st, SIGPIPE, buf, buflen);
1663  print_signal_handler(st, SIGXFSZ, buf, buflen);
1664  print_signal_handler(st, SIGILL , buf, buflen);
1665  print_signal_handler(st, SR_signum, buf, buflen);
1666  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1667  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1668  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1669  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1670  print_signal_handler(st, SIGTRAP, buf, buflen);
1671  print_signal_handler(st, SIGDANGER, buf, buflen);
1672}
1673
1674static char saved_jvm_path[MAXPATHLEN] = {0};
1675
1676// Find the full path to the current module, libjvm.so.
1677void os::jvm_path(char *buf, jint buflen) {
1678  // Error checking.
1679  if (buflen < MAXPATHLEN) {
1680    assert(false, "must use a large-enough buffer");
1681    buf[0] = '\0';
1682    return;
1683  }
1684  // Lazy resolve the path to current module.
1685  if (saved_jvm_path[0] != 0) {
1686    strcpy(buf, saved_jvm_path);
1687    return;
1688  }
1689
1690  Dl_info dlinfo;
1691  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1692  assert(ret != 0, "cannot locate libjvm");
1693  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1694  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1695
1696  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1697  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1698}
1699
1700void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1701  // no prefix required, not even "_"
1702}
1703
1704void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1705  // no suffix required
1706}
1707
1708////////////////////////////////////////////////////////////////////////////////
1709// sun.misc.Signal support
1710
1711static volatile jint sigint_count = 0;
1712
1713static void
1714UserHandler(int sig, void *siginfo, void *context) {
1715  // 4511530 - sem_post is serialized and handled by the manager thread. When
1716  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1717  // don't want to flood the manager thread with sem_post requests.
1718  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1719    return;
1720
1721  // Ctrl-C is pressed during error reporting, likely because the error
1722  // handler fails to abort. Let VM die immediately.
1723  if (sig == SIGINT && is_error_reported()) {
1724    os::die();
1725  }
1726
1727  os::signal_notify(sig);
1728}
1729
1730void* os::user_handler() {
1731  return CAST_FROM_FN_PTR(void*, UserHandler);
1732}
1733
1734extern "C" {
1735  typedef void (*sa_handler_t)(int);
1736  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1737}
1738
1739void* os::signal(int signal_number, void* handler) {
1740  struct sigaction sigAct, oldSigAct;
1741
1742  sigfillset(&(sigAct.sa_mask));
1743
1744  // Do not block out synchronous signals in the signal handler.
1745  // Blocking synchronous signals only makes sense if you can really
1746  // be sure that those signals won't happen during signal handling,
1747  // when the blocking applies. Normal signal handlers are lean and
1748  // do not cause signals. But our signal handlers tend to be "risky"
1749  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1750  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1751  // by a SIGILL, which was blocked due to the signal mask. The process
1752  // just hung forever. Better to crash from a secondary signal than to hang.
1753  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1754  sigdelset(&(sigAct.sa_mask), SIGBUS);
1755  sigdelset(&(sigAct.sa_mask), SIGILL);
1756  sigdelset(&(sigAct.sa_mask), SIGFPE);
1757  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1758
1759  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1760
1761  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1762
1763  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1764    // -1 means registration failed
1765    return (void *)-1;
1766  }
1767
1768  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1769}
1770
1771void os::signal_raise(int signal_number) {
1772  ::raise(signal_number);
1773}
1774
1775//
1776// The following code is moved from os.cpp for making this
1777// code platform specific, which it is by its very nature.
1778//
1779
1780// Will be modified when max signal is changed to be dynamic
1781int os::sigexitnum_pd() {
1782  return NSIG;
1783}
1784
1785// a counter for each possible signal value
1786static volatile jint pending_signals[NSIG+1] = { 0 };
1787
1788// Linux(POSIX) specific hand shaking semaphore.
1789static sem_t sig_sem;
1790
1791void os::signal_init_pd() {
1792  // Initialize signal structures
1793  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1794
1795  // Initialize signal semaphore
1796  int rc = ::sem_init(&sig_sem, 0, 0);
1797  guarantee(rc != -1, "sem_init failed");
1798}
1799
1800void os::signal_notify(int sig) {
1801  Atomic::inc(&pending_signals[sig]);
1802  ::sem_post(&sig_sem);
1803}
1804
1805static int check_pending_signals(bool wait) {
1806  Atomic::store(0, &sigint_count);
1807  for (;;) {
1808    for (int i = 0; i < NSIG + 1; i++) {
1809      jint n = pending_signals[i];
1810      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1811        return i;
1812      }
1813    }
1814    if (!wait) {
1815      return -1;
1816    }
1817    JavaThread *thread = JavaThread::current();
1818    ThreadBlockInVM tbivm(thread);
1819
1820    bool threadIsSuspended;
1821    do {
1822      thread->set_suspend_equivalent();
1823      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1824
1825      ::sem_wait(&sig_sem);
1826
1827      // were we externally suspended while we were waiting?
1828      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1829      if (threadIsSuspended) {
1830        //
1831        // The semaphore has been incremented, but while we were waiting
1832        // another thread suspended us. We don't want to continue running
1833        // while suspended because that would surprise the thread that
1834        // suspended us.
1835        //
1836        ::sem_post(&sig_sem);
1837
1838        thread->java_suspend_self();
1839      }
1840    } while (threadIsSuspended);
1841  }
1842}
1843
1844int os::signal_lookup() {
1845  return check_pending_signals(false);
1846}
1847
1848int os::signal_wait() {
1849  return check_pending_signals(true);
1850}
1851
1852////////////////////////////////////////////////////////////////////////////////
1853// Virtual Memory
1854
1855// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1856
1857#define VMEM_MAPPED  1
1858#define VMEM_SHMATED 2
1859
1860struct vmembk_t {
1861  int type;         // 1 - mmap, 2 - shmat
1862  char* addr;
1863  size_t size;      // Real size, may be larger than usersize.
1864  size_t pagesize;  // page size of area
1865  vmembk_t* next;
1866
1867  bool contains_addr(char* p) const {
1868    return p >= addr && p < (addr + size);
1869  }
1870
1871  bool contains_range(char* p, size_t s) const {
1872    return contains_addr(p) && contains_addr(p + s - 1);
1873  }
1874
1875  void print_on(outputStream* os) const {
1876    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1877      " bytes, %d %s pages), %s",
1878      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1879      (type == VMEM_SHMATED ? "shmat" : "mmap")
1880    );
1881  }
1882
1883  // Check that range is a sub range of memory block (or equal to memory block);
1884  // also check that range is fully page aligned to the page size if the block.
1885  void assert_is_valid_subrange(char* p, size_t s) const {
1886    if (!contains_range(p, s)) {
1887      fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1888              "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1889              p, p + s - 1, addr, addr + size - 1);
1890      guarantee0(false);
1891    }
1892    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1893      fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1894              " aligned to pagesize (%s)\n", p, p + s);
1895      guarantee0(false);
1896    }
1897  }
1898};
1899
1900static struct {
1901  vmembk_t* first;
1902  MiscUtils::CritSect cs;
1903} vmem;
1904
1905static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1906  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1907  assert0(p);
1908  if (p) {
1909    MiscUtils::AutoCritSect lck(&vmem.cs);
1910    p->addr = addr; p->size = size;
1911    p->pagesize = pagesize;
1912    p->type = type;
1913    p->next = vmem.first;
1914    vmem.first = p;
1915  }
1916}
1917
1918static vmembk_t* vmembk_find(char* addr) {
1919  MiscUtils::AutoCritSect lck(&vmem.cs);
1920  for (vmembk_t* p = vmem.first; p; p = p->next) {
1921    if (p->addr <= addr && (p->addr + p->size) > addr) {
1922      return p;
1923    }
1924  }
1925  return NULL;
1926}
1927
1928static void vmembk_remove(vmembk_t* p0) {
1929  MiscUtils::AutoCritSect lck(&vmem.cs);
1930  assert0(p0);
1931  assert0(vmem.first); // List should not be empty.
1932  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1933    if (*pp == p0) {
1934      *pp = p0->next;
1935      ::free(p0);
1936      return;
1937    }
1938  }
1939  assert0(false); // Not found?
1940}
1941
1942static void vmembk_print_on(outputStream* os) {
1943  MiscUtils::AutoCritSect lck(&vmem.cs);
1944  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1945    vmi->print_on(os);
1946    os->cr();
1947  }
1948}
1949
1950// Reserve and attach a section of System V memory.
1951// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1952// address. Failing that, it will attach the memory anywhere.
1953// If <requested_addr> is NULL, function will attach the memory anywhere.
1954//
1955// <alignment_hint> is being ignored by this function. It is very probable however that the
1956// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1957// Should this be not enogh, we can put more work into it.
1958static char* reserve_shmated_memory (
1959  size_t bytes,
1960  char* requested_addr,
1961  size_t alignment_hint) {
1962
1963  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1964    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1965    bytes, requested_addr, alignment_hint);
1966
1967  // Either give me wish address or wish alignment but not both.
1968  assert0(!(requested_addr != NULL && alignment_hint != 0));
1969
1970  // We must prevent anyone from attaching too close to the
1971  // BRK because that may cause malloc OOM.
1972  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1973    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1974      "Will attach anywhere.", requested_addr);
1975    // Act like the OS refused to attach there.
1976    requested_addr = NULL;
1977  }
1978
1979  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1980  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1981  if (os::Aix::on_pase_V5R4_or_older()) {
1982    ShouldNotReachHere();
1983  }
1984
1985  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1986  const size_t size = align_size_up(bytes, SIZE_64K);
1987
1988  // Reserve the shared segment.
1989  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1990  if (shmid == -1) {
1991    trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1992    return NULL;
1993  }
1994
1995  // Important note:
1996  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1997  // We must right after attaching it remove it from the system. System V shm segments are global and
1998  // survive the process.
1999  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2000
2001  struct shmid_ds shmbuf;
2002  memset(&shmbuf, 0, sizeof(shmbuf));
2003  shmbuf.shm_pagesize = SIZE_64K;
2004  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2005    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2006               size / SIZE_64K, errno);
2007    // I want to know if this ever happens.
2008    assert(false, "failed to set page size for shmat");
2009  }
2010
2011  // Now attach the shared segment.
2012  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2013  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2014  // were not a segment boundary.
2015  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2016  const int errno_shmat = errno;
2017
2018  // (A) Right after shmat and before handing shmat errors delete the shm segment.
2019  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2020    trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2021    assert(false, "failed to remove shared memory segment!");
2022  }
2023
2024  // Handle shmat error. If we failed to attach, just return.
2025  if (addr == (char*)-1) {
2026    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2027    return NULL;
2028  }
2029
2030  // Just for info: query the real page size. In case setting the page size did not
2031  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2032  const size_t real_pagesize = os::Aix::query_pagesize(addr);
2033  if (real_pagesize != shmbuf.shm_pagesize) {
2034    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2035  }
2036
2037  if (addr) {
2038    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2039      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2040  } else {
2041    if (requested_addr != NULL) {
2042      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2043    } else {
2044      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2045    }
2046  }
2047
2048  // book-keeping
2049  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2050  assert0(is_aligned_to(addr, os::vm_page_size()));
2051
2052  return addr;
2053}
2054
2055static bool release_shmated_memory(char* addr, size_t size) {
2056
2057  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2058    addr, addr + size - 1);
2059
2060  bool rc = false;
2061
2062  // TODO: is there a way to verify shm size without doing bookkeeping?
2063  if (::shmdt(addr) != 0) {
2064    trcVerbose("error (%d).", errno);
2065  } else {
2066    trcVerbose("ok.");
2067    rc = true;
2068  }
2069  return rc;
2070}
2071
2072static bool uncommit_shmated_memory(char* addr, size_t size) {
2073  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2074    addr, addr + size - 1);
2075
2076  const bool rc = my_disclaim64(addr, size);
2077
2078  if (!rc) {
2079    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2080    return false;
2081  }
2082  return true;
2083}
2084
2085// Reserve memory via mmap.
2086// If <requested_addr> is given, an attempt is made to attach at the given address.
2087// Failing that, memory is allocated at any address.
2088// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2089// allocate at an address aligned with the given alignment. Failing that, memory
2090// is aligned anywhere.
2091static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2092  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2093    "alignment_hint " UINTX_FORMAT "...",
2094    bytes, requested_addr, alignment_hint);
2095
2096  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2097  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2098    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2099    return NULL;
2100  }
2101
2102  // We must prevent anyone from attaching too close to the
2103  // BRK because that may cause malloc OOM.
2104  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2105    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2106      "Will attach anywhere.", requested_addr);
2107    // Act like the OS refused to attach there.
2108    requested_addr = NULL;
2109  }
2110
2111  // Specify one or the other but not both.
2112  assert0(!(requested_addr != NULL && alignment_hint > 0));
2113
2114  // In 64K mode, we claim the global page size (os::vm_page_size())
2115  // is 64K. This is one of the few points where that illusion may
2116  // break, because mmap() will always return memory aligned to 4K. So
2117  // we must ensure we only ever return memory aligned to 64k.
2118  if (alignment_hint) {
2119    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2120  } else {
2121    alignment_hint = os::vm_page_size();
2122  }
2123
2124  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2125  const size_t size = align_size_up(bytes, os::vm_page_size());
2126
2127  // alignment: Allocate memory large enough to include an aligned range of the right size and
2128  // cut off the leading and trailing waste pages.
2129  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2130  const size_t extra_size = size + alignment_hint;
2131
2132  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2133  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2134  int flags = MAP_ANONYMOUS | MAP_SHARED;
2135
2136  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2137  // it means if wishaddress is given but MAP_FIXED is not set.
2138  //
2139  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2140  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2141  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2142  // get clobbered.
2143  if (requested_addr != NULL) {
2144    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2145      flags |= MAP_FIXED;
2146    }
2147  }
2148
2149  char* addr = (char*)::mmap(requested_addr, extra_size,
2150      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2151
2152  if (addr == MAP_FAILED) {
2153    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2154    return NULL;
2155  }
2156
2157  // Handle alignment.
2158  char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2159  const size_t waste_pre = addr_aligned - addr;
2160  char* const addr_aligned_end = addr_aligned + size;
2161  const size_t waste_post = extra_size - waste_pre - size;
2162  if (waste_pre > 0) {
2163    ::munmap(addr, waste_pre);
2164  }
2165  if (waste_post > 0) {
2166    ::munmap(addr_aligned_end, waste_post);
2167  }
2168  addr = addr_aligned;
2169
2170  if (addr) {
2171    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2172      addr, addr + bytes, bytes);
2173  } else {
2174    if (requested_addr != NULL) {
2175      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2176    } else {
2177      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2178    }
2179  }
2180
2181  // bookkeeping
2182  vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2183
2184  // Test alignment, see above.
2185  assert0(is_aligned_to(addr, os::vm_page_size()));
2186
2187  return addr;
2188}
2189
2190static bool release_mmaped_memory(char* addr, size_t size) {
2191  assert0(is_aligned_to(addr, os::vm_page_size()));
2192  assert0(is_aligned_to(size, os::vm_page_size()));
2193
2194  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2195    addr, addr + size - 1);
2196  bool rc = false;
2197
2198  if (::munmap(addr, size) != 0) {
2199    trcVerbose("failed (%d)\n", errno);
2200    rc = false;
2201  } else {
2202    trcVerbose("ok.");
2203    rc = true;
2204  }
2205
2206  return rc;
2207}
2208
2209static bool uncommit_mmaped_memory(char* addr, size_t size) {
2210
2211  assert0(is_aligned_to(addr, os::vm_page_size()));
2212  assert0(is_aligned_to(size, os::vm_page_size()));
2213
2214  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2215    addr, addr + size - 1);
2216  bool rc = false;
2217
2218  // Uncommit mmap memory with msync MS_INVALIDATE.
2219  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2220    trcVerbose("failed (%d)\n", errno);
2221    rc = false;
2222  } else {
2223    trcVerbose("ok.");
2224    rc = true;
2225  }
2226
2227  return rc;
2228}
2229
2230// End: shared memory bookkeeping
2231////////////////////////////////////////////////////////////////////////////////////////////////////
2232
2233int os::vm_page_size() {
2234  // Seems redundant as all get out.
2235  assert(os::Aix::page_size() != -1, "must call os::init");
2236  return os::Aix::page_size();
2237}
2238
2239// Aix allocates memory by pages.
2240int os::vm_allocation_granularity() {
2241  assert(os::Aix::page_size() != -1, "must call os::init");
2242  return os::Aix::page_size();
2243}
2244
2245#ifdef PRODUCT
2246static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2247                                    int err) {
2248  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2249          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2250          strerror(err), err);
2251}
2252#endif
2253
2254void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2255                                  const char* mesg) {
2256  assert(mesg != NULL, "mesg must be specified");
2257  if (!pd_commit_memory(addr, size, exec)) {
2258    // Add extra info in product mode for vm_exit_out_of_memory():
2259    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2260    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2261  }
2262}
2263
2264bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2265
2266  assert(is_aligned_to(addr, os::vm_page_size()),
2267    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2268    p2i(addr), os::vm_page_size());
2269  assert(is_aligned_to(size, os::vm_page_size()),
2270    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2271    size, os::vm_page_size());
2272
2273  vmembk_t* const vmi = vmembk_find(addr);
2274  assert0(vmi);
2275  vmi->assert_is_valid_subrange(addr, size);
2276
2277  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2278
2279  return true;
2280}
2281
2282bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2283  return pd_commit_memory(addr, size, exec);
2284}
2285
2286void os::pd_commit_memory_or_exit(char* addr, size_t size,
2287                                  size_t alignment_hint, bool exec,
2288                                  const char* mesg) {
2289  // Alignment_hint is ignored on this OS.
2290  pd_commit_memory_or_exit(addr, size, exec, mesg);
2291}
2292
2293bool os::pd_uncommit_memory(char* addr, size_t size) {
2294  assert(is_aligned_to(addr, os::vm_page_size()),
2295    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2296    p2i(addr), os::vm_page_size());
2297  assert(is_aligned_to(size, os::vm_page_size()),
2298    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2299    size, os::vm_page_size());
2300
2301  // Dynamically do different things for mmap/shmat.
2302  const vmembk_t* const vmi = vmembk_find(addr);
2303  assert0(vmi);
2304  vmi->assert_is_valid_subrange(addr, size);
2305
2306  if (vmi->type == VMEM_SHMATED) {
2307    return uncommit_shmated_memory(addr, size);
2308  } else {
2309    return uncommit_mmaped_memory(addr, size);
2310  }
2311}
2312
2313bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2314  // Do not call this; no need to commit stack pages on AIX.
2315  ShouldNotReachHere();
2316  return true;
2317}
2318
2319bool os::remove_stack_guard_pages(char* addr, size_t size) {
2320  // Do not call this; no need to commit stack pages on AIX.
2321  ShouldNotReachHere();
2322  return true;
2323}
2324
2325void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2326}
2327
2328void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2329}
2330
2331void os::numa_make_global(char *addr, size_t bytes) {
2332}
2333
2334void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2335}
2336
2337bool os::numa_topology_changed() {
2338  return false;
2339}
2340
2341size_t os::numa_get_groups_num() {
2342  return 1;
2343}
2344
2345int os::numa_get_group_id() {
2346  return 0;
2347}
2348
2349size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2350  if (size > 0) {
2351    ids[0] = 0;
2352    return 1;
2353  }
2354  return 0;
2355}
2356
2357bool os::get_page_info(char *start, page_info* info) {
2358  return false;
2359}
2360
2361char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2362  return end;
2363}
2364
2365// Reserves and attaches a shared memory segment.
2366// Will assert if a wish address is given and could not be obtained.
2367char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2368
2369  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2370  // thereby clobbering old mappings at that place. That is probably
2371  // not intended, never used and almost certainly an error were it
2372  // ever be used this way (to try attaching at a specified address
2373  // without clobbering old mappings an alternate API exists,
2374  // os::attempt_reserve_memory_at()).
2375  // Instead of mimicking the dangerous coding of the other platforms, here I
2376  // just ignore the request address (release) or assert(debug).
2377  assert0(requested_addr == NULL);
2378
2379  // Always round to os::vm_page_size(), which may be larger than 4K.
2380  bytes = align_size_up(bytes, os::vm_page_size());
2381  const size_t alignment_hint0 =
2382    alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2383
2384  // In 4K mode always use mmap.
2385  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2386  if (os::vm_page_size() == SIZE_4K) {
2387    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2388  } else {
2389    if (bytes >= Use64KPagesThreshold) {
2390      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2391    } else {
2392      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2393    }
2394  }
2395}
2396
2397bool os::pd_release_memory(char* addr, size_t size) {
2398
2399  // Dynamically do different things for mmap/shmat.
2400  vmembk_t* const vmi = vmembk_find(addr);
2401  assert0(vmi);
2402
2403  // Always round to os::vm_page_size(), which may be larger than 4K.
2404  size = align_size_up(size, os::vm_page_size());
2405  addr = (char *)align_ptr_up(addr, os::vm_page_size());
2406
2407  bool rc = false;
2408  bool remove_bookkeeping = false;
2409  if (vmi->type == VMEM_SHMATED) {
2410    // For shmatted memory, we do:
2411    // - If user wants to release the whole range, release the memory (shmdt).
2412    // - If user only wants to release a partial range, uncommit (disclaim) that
2413    //   range. That way, at least, we do not use memory anymore (bust still page
2414    //   table space).
2415    vmi->assert_is_valid_subrange(addr, size);
2416    if (addr == vmi->addr && size == vmi->size) {
2417      rc = release_shmated_memory(addr, size);
2418      remove_bookkeeping = true;
2419    } else {
2420      rc = uncommit_shmated_memory(addr, size);
2421    }
2422  } else {
2423    // User may unmap partial regions but region has to be fully contained.
2424#ifdef ASSERT
2425    vmi->assert_is_valid_subrange(addr, size);
2426#endif
2427    rc = release_mmaped_memory(addr, size);
2428    remove_bookkeeping = true;
2429  }
2430
2431  // update bookkeeping
2432  if (rc && remove_bookkeeping) {
2433    vmembk_remove(vmi);
2434  }
2435
2436  return rc;
2437}
2438
2439static bool checked_mprotect(char* addr, size_t size, int prot) {
2440
2441  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2442  // not tell me if protection failed when trying to protect an un-protectable range.
2443  //
2444  // This means if the memory was allocated using shmget/shmat, protection wont work
2445  // but mprotect will still return 0:
2446  //
2447  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2448
2449  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2450
2451  if (!rc) {
2452    const char* const s_errno = strerror(errno);
2453    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2454    return false;
2455  }
2456
2457  // mprotect success check
2458  //
2459  // Mprotect said it changed the protection but can I believe it?
2460  //
2461  // To be sure I need to check the protection afterwards. Try to
2462  // read from protected memory and check whether that causes a segfault.
2463  //
2464  if (!os::Aix::xpg_sus_mode()) {
2465
2466    if (CanUseSafeFetch32()) {
2467
2468      const bool read_protected =
2469        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2470         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2471
2472      if (prot & PROT_READ) {
2473        rc = !read_protected;
2474      } else {
2475        rc = read_protected;
2476      }
2477    }
2478  }
2479  if (!rc) {
2480    assert(false, "mprotect failed.");
2481  }
2482  return rc;
2483}
2484
2485// Set protections specified
2486bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2487  unsigned int p = 0;
2488  switch (prot) {
2489  case MEM_PROT_NONE: p = PROT_NONE; break;
2490  case MEM_PROT_READ: p = PROT_READ; break;
2491  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2492  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2493  default:
2494    ShouldNotReachHere();
2495  }
2496  // is_committed is unused.
2497  return checked_mprotect(addr, size, p);
2498}
2499
2500bool os::guard_memory(char* addr, size_t size) {
2501  return checked_mprotect(addr, size, PROT_NONE);
2502}
2503
2504bool os::unguard_memory(char* addr, size_t size) {
2505  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2506}
2507
2508// Large page support
2509
2510static size_t _large_page_size = 0;
2511
2512// Enable large page support if OS allows that.
2513void os::large_page_init() {
2514  return; // Nothing to do. See query_multipage_support and friends.
2515}
2516
2517char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2518  // "exec" is passed in but not used. Creating the shared image for
2519  // the code cache doesn't have an SHM_X executable permission to check.
2520  Unimplemented();
2521  return 0;
2522}
2523
2524bool os::release_memory_special(char* base, size_t bytes) {
2525  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2526  Unimplemented();
2527  return false;
2528}
2529
2530size_t os::large_page_size() {
2531  return _large_page_size;
2532}
2533
2534bool os::can_commit_large_page_memory() {
2535  // Does not matter, we do not support huge pages.
2536  return false;
2537}
2538
2539bool os::can_execute_large_page_memory() {
2540  // Does not matter, we do not support huge pages.
2541  return false;
2542}
2543
2544// Reserve memory at an arbitrary address, only if that area is
2545// available (and not reserved for something else).
2546char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2547  char* addr = NULL;
2548
2549  // Always round to os::vm_page_size(), which may be larger than 4K.
2550  bytes = align_size_up(bytes, os::vm_page_size());
2551
2552  // In 4K mode always use mmap.
2553  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2554  if (os::vm_page_size() == SIZE_4K) {
2555    return reserve_mmaped_memory(bytes, requested_addr, 0);
2556  } else {
2557    if (bytes >= Use64KPagesThreshold) {
2558      return reserve_shmated_memory(bytes, requested_addr, 0);
2559    } else {
2560      return reserve_mmaped_memory(bytes, requested_addr, 0);
2561    }
2562  }
2563
2564  return addr;
2565}
2566
2567size_t os::read(int fd, void *buf, unsigned int nBytes) {
2568  return ::read(fd, buf, nBytes);
2569}
2570
2571size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2572  return ::pread(fd, buf, nBytes, offset);
2573}
2574
2575void os::naked_short_sleep(jlong ms) {
2576  struct timespec req;
2577
2578  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2579  req.tv_sec = 0;
2580  if (ms > 0) {
2581    req.tv_nsec = (ms % 1000) * 1000000;
2582  }
2583  else {
2584    req.tv_nsec = 1;
2585  }
2586
2587  nanosleep(&req, NULL);
2588
2589  return;
2590}
2591
2592// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2593void os::infinite_sleep() {
2594  while (true) {    // sleep forever ...
2595    ::sleep(100);   // ... 100 seconds at a time
2596  }
2597}
2598
2599// Used to convert frequent JVM_Yield() to nops
2600bool os::dont_yield() {
2601  return DontYieldALot;
2602}
2603
2604void os::naked_yield() {
2605  sched_yield();
2606}
2607
2608////////////////////////////////////////////////////////////////////////////////
2609// thread priority support
2610
2611// From AIX manpage to pthread_setschedparam
2612// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2613//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2614//
2615// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2616// range from 40 to 80, where 40 is the least favored priority and 80
2617// is the most favored."
2618//
2619// (Actually, I doubt this even has an impact on AIX, as we do kernel
2620// scheduling there; however, this still leaves iSeries.)
2621//
2622// We use the same values for AIX and PASE.
2623int os::java_to_os_priority[CriticalPriority + 1] = {
2624  54,             // 0 Entry should never be used
2625
2626  55,             // 1 MinPriority
2627  55,             // 2
2628  56,             // 3
2629
2630  56,             // 4
2631  57,             // 5 NormPriority
2632  57,             // 6
2633
2634  58,             // 7
2635  58,             // 8
2636  59,             // 9 NearMaxPriority
2637
2638  60,             // 10 MaxPriority
2639
2640  60              // 11 CriticalPriority
2641};
2642
2643OSReturn os::set_native_priority(Thread* thread, int newpri) {
2644  if (!UseThreadPriorities) return OS_OK;
2645  pthread_t thr = thread->osthread()->pthread_id();
2646  int policy = SCHED_OTHER;
2647  struct sched_param param;
2648  param.sched_priority = newpri;
2649  int ret = pthread_setschedparam(thr, policy, &param);
2650
2651  if (ret != 0) {
2652    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2653        (int)thr, newpri, ret, strerror(ret));
2654  }
2655  return (ret == 0) ? OS_OK : OS_ERR;
2656}
2657
2658OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2659  if (!UseThreadPriorities) {
2660    *priority_ptr = java_to_os_priority[NormPriority];
2661    return OS_OK;
2662  }
2663  pthread_t thr = thread->osthread()->pthread_id();
2664  int policy = SCHED_OTHER;
2665  struct sched_param param;
2666  int ret = pthread_getschedparam(thr, &policy, &param);
2667  *priority_ptr = param.sched_priority;
2668
2669  return (ret == 0) ? OS_OK : OS_ERR;
2670}
2671
2672// Hint to the underlying OS that a task switch would not be good.
2673// Void return because it's a hint and can fail.
2674void os::hint_no_preempt() {}
2675
2676////////////////////////////////////////////////////////////////////////////////
2677// suspend/resume support
2678
2679//  the low-level signal-based suspend/resume support is a remnant from the
2680//  old VM-suspension that used to be for java-suspension, safepoints etc,
2681//  within hotspot. Now there is a single use-case for this:
2682//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2683//      that runs in the watcher thread.
2684//  The remaining code is greatly simplified from the more general suspension
2685//  code that used to be used.
2686//
2687//  The protocol is quite simple:
2688//  - suspend:
2689//      - sends a signal to the target thread
2690//      - polls the suspend state of the osthread using a yield loop
2691//      - target thread signal handler (SR_handler) sets suspend state
2692//        and blocks in sigsuspend until continued
2693//  - resume:
2694//      - sets target osthread state to continue
2695//      - sends signal to end the sigsuspend loop in the SR_handler
2696//
2697//  Note that the SR_lock plays no role in this suspend/resume protocol.
2698//
2699
2700static void resume_clear_context(OSThread *osthread) {
2701  osthread->set_ucontext(NULL);
2702  osthread->set_siginfo(NULL);
2703}
2704
2705static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2706  osthread->set_ucontext(context);
2707  osthread->set_siginfo(siginfo);
2708}
2709
2710//
2711// Handler function invoked when a thread's execution is suspended or
2712// resumed. We have to be careful that only async-safe functions are
2713// called here (Note: most pthread functions are not async safe and
2714// should be avoided.)
2715//
2716// Note: sigwait() is a more natural fit than sigsuspend() from an
2717// interface point of view, but sigwait() prevents the signal hander
2718// from being run. libpthread would get very confused by not having
2719// its signal handlers run and prevents sigwait()'s use with the
2720// mutex granting granting signal.
2721//
2722// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2723//
2724static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2725  // Save and restore errno to avoid confusing native code with EINTR
2726  // after sigsuspend.
2727  int old_errno = errno;
2728
2729  Thread* thread = Thread::current();
2730  OSThread* osthread = thread->osthread();
2731  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2732
2733  os::SuspendResume::State current = osthread->sr.state();
2734  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2735    suspend_save_context(osthread, siginfo, context);
2736
2737    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2738    os::SuspendResume::State state = osthread->sr.suspended();
2739    if (state == os::SuspendResume::SR_SUSPENDED) {
2740      sigset_t suspend_set;  // signals for sigsuspend()
2741
2742      // get current set of blocked signals and unblock resume signal
2743      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2744      sigdelset(&suspend_set, SR_signum);
2745
2746      // wait here until we are resumed
2747      while (1) {
2748        sigsuspend(&suspend_set);
2749
2750        os::SuspendResume::State result = osthread->sr.running();
2751        if (result == os::SuspendResume::SR_RUNNING) {
2752          break;
2753        }
2754      }
2755
2756    } else if (state == os::SuspendResume::SR_RUNNING) {
2757      // request was cancelled, continue
2758    } else {
2759      ShouldNotReachHere();
2760    }
2761
2762    resume_clear_context(osthread);
2763  } else if (current == os::SuspendResume::SR_RUNNING) {
2764    // request was cancelled, continue
2765  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2766    // ignore
2767  } else {
2768    ShouldNotReachHere();
2769  }
2770
2771  errno = old_errno;
2772}
2773
2774static int SR_initialize() {
2775  struct sigaction act;
2776  char *s;
2777  // Get signal number to use for suspend/resume
2778  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2779    int sig = ::strtol(s, 0, 10);
2780    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2781        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2782      SR_signum = sig;
2783    } else {
2784      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2785              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2786    }
2787  }
2788
2789  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2790        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2791
2792  sigemptyset(&SR_sigset);
2793  sigaddset(&SR_sigset, SR_signum);
2794
2795  // Set up signal handler for suspend/resume.
2796  act.sa_flags = SA_RESTART|SA_SIGINFO;
2797  act.sa_handler = (void (*)(int)) SR_handler;
2798
2799  // SR_signum is blocked by default.
2800  // 4528190 - We also need to block pthread restart signal (32 on all
2801  // supported Linux platforms). Note that LinuxThreads need to block
2802  // this signal for all threads to work properly. So we don't have
2803  // to use hard-coded signal number when setting up the mask.
2804  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2805
2806  if (sigaction(SR_signum, &act, 0) == -1) {
2807    return -1;
2808  }
2809
2810  // Save signal flag
2811  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2812  return 0;
2813}
2814
2815static int SR_finalize() {
2816  return 0;
2817}
2818
2819static int sr_notify(OSThread* osthread) {
2820  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2821  assert_status(status == 0, status, "pthread_kill");
2822  return status;
2823}
2824
2825// "Randomly" selected value for how long we want to spin
2826// before bailing out on suspending a thread, also how often
2827// we send a signal to a thread we want to resume
2828static const int RANDOMLY_LARGE_INTEGER = 1000000;
2829static const int RANDOMLY_LARGE_INTEGER2 = 100;
2830
2831// returns true on success and false on error - really an error is fatal
2832// but this seems the normal response to library errors
2833static bool do_suspend(OSThread* osthread) {
2834  assert(osthread->sr.is_running(), "thread should be running");
2835  // mark as suspended and send signal
2836
2837  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2838    // failed to switch, state wasn't running?
2839    ShouldNotReachHere();
2840    return false;
2841  }
2842
2843  if (sr_notify(osthread) != 0) {
2844    // try to cancel, switch to running
2845
2846    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2847    if (result == os::SuspendResume::SR_RUNNING) {
2848      // cancelled
2849      return false;
2850    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2851      // somehow managed to suspend
2852      return true;
2853    } else {
2854      ShouldNotReachHere();
2855      return false;
2856    }
2857  }
2858
2859  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2860
2861  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2862    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2863      os::naked_yield();
2864    }
2865
2866    // timeout, try to cancel the request
2867    if (n >= RANDOMLY_LARGE_INTEGER) {
2868      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2869      if (cancelled == os::SuspendResume::SR_RUNNING) {
2870        return false;
2871      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2872        return true;
2873      } else {
2874        ShouldNotReachHere();
2875        return false;
2876      }
2877    }
2878  }
2879
2880  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2881  return true;
2882}
2883
2884static void do_resume(OSThread* osthread) {
2885  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2886
2887  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2888    // failed to switch to WAKEUP_REQUEST
2889    ShouldNotReachHere();
2890    return;
2891  }
2892
2893  while (!osthread->sr.is_running()) {
2894    if (sr_notify(osthread) == 0) {
2895      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2896        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2897          os::naked_yield();
2898        }
2899      }
2900    } else {
2901      ShouldNotReachHere();
2902    }
2903  }
2904
2905  guarantee(osthread->sr.is_running(), "Must be running!");
2906}
2907
2908///////////////////////////////////////////////////////////////////////////////////
2909// signal handling (except suspend/resume)
2910
2911// This routine may be used by user applications as a "hook" to catch signals.
2912// The user-defined signal handler must pass unrecognized signals to this
2913// routine, and if it returns true (non-zero), then the signal handler must
2914// return immediately. If the flag "abort_if_unrecognized" is true, then this
2915// routine will never retun false (zero), but instead will execute a VM panic
2916// routine kill the process.
2917//
2918// If this routine returns false, it is OK to call it again. This allows
2919// the user-defined signal handler to perform checks either before or after
2920// the VM performs its own checks. Naturally, the user code would be making
2921// a serious error if it tried to handle an exception (such as a null check
2922// or breakpoint) that the VM was generating for its own correct operation.
2923//
2924// This routine may recognize any of the following kinds of signals:
2925//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2926// It should be consulted by handlers for any of those signals.
2927//
2928// The caller of this routine must pass in the three arguments supplied
2929// to the function referred to in the "sa_sigaction" (not the "sa_handler")
2930// field of the structure passed to sigaction(). This routine assumes that
2931// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2932//
2933// Note that the VM will print warnings if it detects conflicting signal
2934// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2935//
2936extern "C" JNIEXPORT int
2937JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2938
2939// Set thread signal mask (for some reason on AIX sigthreadmask() seems
2940// to be the thing to call; documentation is not terribly clear about whether
2941// pthread_sigmask also works, and if it does, whether it does the same.
2942bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2943  const int rc = ::pthread_sigmask(how, set, oset);
2944  // return value semantics differ slightly for error case:
2945  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2946  // (so, pthread_sigmask is more theadsafe for error handling)
2947  // But success is always 0.
2948  return rc == 0 ? true : false;
2949}
2950
2951// Function to unblock all signals which are, according
2952// to POSIX, typical program error signals. If they happen while being blocked,
2953// they typically will bring down the process immediately.
2954bool unblock_program_error_signals() {
2955  sigset_t set;
2956  ::sigemptyset(&set);
2957  ::sigaddset(&set, SIGILL);
2958  ::sigaddset(&set, SIGBUS);
2959  ::sigaddset(&set, SIGFPE);
2960  ::sigaddset(&set, SIGSEGV);
2961  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2962}
2963
2964// Renamed from 'signalHandler' to avoid collision with other shared libs.
2965void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2966  assert(info != NULL && uc != NULL, "it must be old kernel");
2967
2968  // Never leave program error signals blocked;
2969  // on all our platforms they would bring down the process immediately when
2970  // getting raised while being blocked.
2971  unblock_program_error_signals();
2972
2973  JVM_handle_aix_signal(sig, info, uc, true);
2974}
2975
2976// This boolean allows users to forward their own non-matching signals
2977// to JVM_handle_aix_signal, harmlessly.
2978bool os::Aix::signal_handlers_are_installed = false;
2979
2980// For signal-chaining
2981struct sigaction sigact[NSIG];
2982sigset_t sigs;
2983bool os::Aix::libjsig_is_loaded = false;
2984typedef struct sigaction *(*get_signal_t)(int);
2985get_signal_t os::Aix::get_signal_action = NULL;
2986
2987struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2988  struct sigaction *actp = NULL;
2989
2990  if (libjsig_is_loaded) {
2991    // Retrieve the old signal handler from libjsig
2992    actp = (*get_signal_action)(sig);
2993  }
2994  if (actp == NULL) {
2995    // Retrieve the preinstalled signal handler from jvm
2996    actp = get_preinstalled_handler(sig);
2997  }
2998
2999  return actp;
3000}
3001
3002static bool call_chained_handler(struct sigaction *actp, int sig,
3003                                 siginfo_t *siginfo, void *context) {
3004  // Call the old signal handler
3005  if (actp->sa_handler == SIG_DFL) {
3006    // It's more reasonable to let jvm treat it as an unexpected exception
3007    // instead of taking the default action.
3008    return false;
3009  } else if (actp->sa_handler != SIG_IGN) {
3010    if ((actp->sa_flags & SA_NODEFER) == 0) {
3011      // automaticlly block the signal
3012      sigaddset(&(actp->sa_mask), sig);
3013    }
3014
3015    sa_handler_t hand = NULL;
3016    sa_sigaction_t sa = NULL;
3017    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3018    // retrieve the chained handler
3019    if (siginfo_flag_set) {
3020      sa = actp->sa_sigaction;
3021    } else {
3022      hand = actp->sa_handler;
3023    }
3024
3025    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3026      actp->sa_handler = SIG_DFL;
3027    }
3028
3029    // try to honor the signal mask
3030    sigset_t oset;
3031    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3032
3033    // call into the chained handler
3034    if (siginfo_flag_set) {
3035      (*sa)(sig, siginfo, context);
3036    } else {
3037      (*hand)(sig);
3038    }
3039
3040    // restore the signal mask
3041    pthread_sigmask(SIG_SETMASK, &oset, 0);
3042  }
3043  // Tell jvm's signal handler the signal is taken care of.
3044  return true;
3045}
3046
3047bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3048  bool chained = false;
3049  // signal-chaining
3050  if (UseSignalChaining) {
3051    struct sigaction *actp = get_chained_signal_action(sig);
3052    if (actp != NULL) {
3053      chained = call_chained_handler(actp, sig, siginfo, context);
3054    }
3055  }
3056  return chained;
3057}
3058
3059struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3060  if (sigismember(&sigs, sig)) {
3061    return &sigact[sig];
3062  }
3063  return NULL;
3064}
3065
3066void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3067  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3068  sigact[sig] = oldAct;
3069  sigaddset(&sigs, sig);
3070}
3071
3072// for diagnostic
3073int sigflags[NSIG];
3074
3075int os::Aix::get_our_sigflags(int sig) {
3076  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3077  return sigflags[sig];
3078}
3079
3080void os::Aix::set_our_sigflags(int sig, int flags) {
3081  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3082  if (sig > 0 && sig < NSIG) {
3083    sigflags[sig] = flags;
3084  }
3085}
3086
3087void os::Aix::set_signal_handler(int sig, bool set_installed) {
3088  // Check for overwrite.
3089  struct sigaction oldAct;
3090  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3091
3092  void* oldhand = oldAct.sa_sigaction
3093    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3094    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3095  // Renamed 'signalHandler' to avoid collision with other shared libs.
3096  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3097      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3098      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3099    if (AllowUserSignalHandlers || !set_installed) {
3100      // Do not overwrite; user takes responsibility to forward to us.
3101      return;
3102    } else if (UseSignalChaining) {
3103      // save the old handler in jvm
3104      save_preinstalled_handler(sig, oldAct);
3105      // libjsig also interposes the sigaction() call below and saves the
3106      // old sigaction on it own.
3107    } else {
3108      fatal("Encountered unexpected pre-existing sigaction handler "
3109            "%#lx for signal %d.", (long)oldhand, sig);
3110    }
3111  }
3112
3113  struct sigaction sigAct;
3114  sigfillset(&(sigAct.sa_mask));
3115  if (!set_installed) {
3116    sigAct.sa_handler = SIG_DFL;
3117    sigAct.sa_flags = SA_RESTART;
3118  } else {
3119    // Renamed 'signalHandler' to avoid collision with other shared libs.
3120    sigAct.sa_sigaction = javaSignalHandler;
3121    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3122  }
3123  // Save flags, which are set by ours
3124  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3125  sigflags[sig] = sigAct.sa_flags;
3126
3127  int ret = sigaction(sig, &sigAct, &oldAct);
3128  assert(ret == 0, "check");
3129
3130  void* oldhand2 = oldAct.sa_sigaction
3131                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3132                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3133  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3134}
3135
3136// install signal handlers for signals that HotSpot needs to
3137// handle in order to support Java-level exception handling.
3138void os::Aix::install_signal_handlers() {
3139  if (!signal_handlers_are_installed) {
3140    signal_handlers_are_installed = true;
3141
3142    // signal-chaining
3143    typedef void (*signal_setting_t)();
3144    signal_setting_t begin_signal_setting = NULL;
3145    signal_setting_t end_signal_setting = NULL;
3146    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3147                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3148    if (begin_signal_setting != NULL) {
3149      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3150                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3151      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3152                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3153      libjsig_is_loaded = true;
3154      assert(UseSignalChaining, "should enable signal-chaining");
3155    }
3156    if (libjsig_is_loaded) {
3157      // Tell libjsig jvm is setting signal handlers.
3158      (*begin_signal_setting)();
3159    }
3160
3161    ::sigemptyset(&sigs);
3162    set_signal_handler(SIGSEGV, true);
3163    set_signal_handler(SIGPIPE, true);
3164    set_signal_handler(SIGBUS, true);
3165    set_signal_handler(SIGILL, true);
3166    set_signal_handler(SIGFPE, true);
3167    set_signal_handler(SIGTRAP, true);
3168    set_signal_handler(SIGXFSZ, true);
3169    set_signal_handler(SIGDANGER, true);
3170
3171    if (libjsig_is_loaded) {
3172      // Tell libjsig jvm finishes setting signal handlers.
3173      (*end_signal_setting)();
3174    }
3175
3176    // We don't activate signal checker if libjsig is in place, we trust ourselves
3177    // and if UserSignalHandler is installed all bets are off.
3178    // Log that signal checking is off only if -verbose:jni is specified.
3179    if (CheckJNICalls) {
3180      if (libjsig_is_loaded) {
3181        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3182        check_signals = false;
3183      }
3184      if (AllowUserSignalHandlers) {
3185        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3186        check_signals = false;
3187      }
3188      // Need to initialize check_signal_done.
3189      ::sigemptyset(&check_signal_done);
3190    }
3191  }
3192}
3193
3194static const char* get_signal_handler_name(address handler,
3195                                           char* buf, int buflen) {
3196  int offset;
3197  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3198  if (found) {
3199    // skip directory names
3200    const char *p1, *p2;
3201    p1 = buf;
3202    size_t len = strlen(os::file_separator());
3203    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3204    // The way os::dll_address_to_library_name is implemented on Aix
3205    // right now, it always returns -1 for the offset which is not
3206    // terribly informative.
3207    // Will fix that. For now, omit the offset.
3208    jio_snprintf(buf, buflen, "%s", p1);
3209  } else {
3210    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3211  }
3212  return buf;
3213}
3214
3215static void print_signal_handler(outputStream* st, int sig,
3216                                 char* buf, size_t buflen) {
3217  struct sigaction sa;
3218  sigaction(sig, NULL, &sa);
3219
3220  st->print("%s: ", os::exception_name(sig, buf, buflen));
3221
3222  address handler = (sa.sa_flags & SA_SIGINFO)
3223    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3224    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3225
3226  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3227    st->print("SIG_DFL");
3228  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3229    st->print("SIG_IGN");
3230  } else {
3231    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3232  }
3233
3234  // Print readable mask.
3235  st->print(", sa_mask[0]=");
3236  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3237
3238  address rh = VMError::get_resetted_sighandler(sig);
3239  // May be, handler was resetted by VMError?
3240  if (rh != NULL) {
3241    handler = rh;
3242    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3243  }
3244
3245  // Print textual representation of sa_flags.
3246  st->print(", sa_flags=");
3247  os::Posix::print_sa_flags(st, sa.sa_flags);
3248
3249  // Check: is it our handler?
3250  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3251      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3252    // It is our signal handler.
3253    // Check for flags, reset system-used one!
3254    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3255      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3256                os::Aix::get_our_sigflags(sig));
3257    }
3258  }
3259  st->cr();
3260}
3261
3262#define DO_SIGNAL_CHECK(sig) \
3263  if (!sigismember(&check_signal_done, sig)) \
3264    os::Aix::check_signal_handler(sig)
3265
3266// This method is a periodic task to check for misbehaving JNI applications
3267// under CheckJNI, we can add any periodic checks here
3268
3269void os::run_periodic_checks() {
3270
3271  if (check_signals == false) return;
3272
3273  // SEGV and BUS if overridden could potentially prevent
3274  // generation of hs*.log in the event of a crash, debugging
3275  // such a case can be very challenging, so we absolutely
3276  // check the following for a good measure:
3277  DO_SIGNAL_CHECK(SIGSEGV);
3278  DO_SIGNAL_CHECK(SIGILL);
3279  DO_SIGNAL_CHECK(SIGFPE);
3280  DO_SIGNAL_CHECK(SIGBUS);
3281  DO_SIGNAL_CHECK(SIGPIPE);
3282  DO_SIGNAL_CHECK(SIGXFSZ);
3283  if (UseSIGTRAP) {
3284    DO_SIGNAL_CHECK(SIGTRAP);
3285  }
3286  DO_SIGNAL_CHECK(SIGDANGER);
3287
3288  // ReduceSignalUsage allows the user to override these handlers
3289  // see comments at the very top and jvm_solaris.h
3290  if (!ReduceSignalUsage) {
3291    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3292    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3293    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3294    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3295  }
3296
3297  DO_SIGNAL_CHECK(SR_signum);
3298}
3299
3300typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3301
3302static os_sigaction_t os_sigaction = NULL;
3303
3304void os::Aix::check_signal_handler(int sig) {
3305  char buf[O_BUFLEN];
3306  address jvmHandler = NULL;
3307
3308  struct sigaction act;
3309  if (os_sigaction == NULL) {
3310    // only trust the default sigaction, in case it has been interposed
3311    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3312    if (os_sigaction == NULL) return;
3313  }
3314
3315  os_sigaction(sig, (struct sigaction*)NULL, &act);
3316
3317  address thisHandler = (act.sa_flags & SA_SIGINFO)
3318    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3319    : CAST_FROM_FN_PTR(address, act.sa_handler);
3320
3321  switch(sig) {
3322  case SIGSEGV:
3323  case SIGBUS:
3324  case SIGFPE:
3325  case SIGPIPE:
3326  case SIGILL:
3327  case SIGXFSZ:
3328    // Renamed 'signalHandler' to avoid collision with other shared libs.
3329    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3330    break;
3331
3332  case SHUTDOWN1_SIGNAL:
3333  case SHUTDOWN2_SIGNAL:
3334  case SHUTDOWN3_SIGNAL:
3335  case BREAK_SIGNAL:
3336    jvmHandler = (address)user_handler();
3337    break;
3338
3339  default:
3340    if (sig == SR_signum) {
3341      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3342    } else {
3343      return;
3344    }
3345    break;
3346  }
3347
3348  if (thisHandler != jvmHandler) {
3349    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3350    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3351    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3352    // No need to check this sig any longer
3353    sigaddset(&check_signal_done, sig);
3354    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3355    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3356      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3357                    exception_name(sig, buf, O_BUFLEN));
3358    }
3359  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3360    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3361    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3362    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3363    // No need to check this sig any longer
3364    sigaddset(&check_signal_done, sig);
3365  }
3366
3367  // Dump all the signal
3368  if (sigismember(&check_signal_done, sig)) {
3369    print_signal_handlers(tty, buf, O_BUFLEN);
3370  }
3371}
3372
3373// To install functions for atexit system call
3374extern "C" {
3375  static void perfMemory_exit_helper() {
3376    perfMemory_exit();
3377  }
3378}
3379
3380// This is called _before_ the most of global arguments have been parsed.
3381void os::init(void) {
3382  // This is basic, we want to know if that ever changes.
3383  // (Shared memory boundary is supposed to be a 256M aligned.)
3384  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3385
3386  // First off, we need to know whether we run on AIX or PASE, and
3387  // the OS level we run on.
3388  os::Aix::initialize_os_info();
3389
3390  // Scan environment (SPEC1170 behaviour, etc).
3391  os::Aix::scan_environment();
3392
3393  // Check which pages are supported by AIX.
3394  query_multipage_support();
3395
3396  // Act like we only have one page size by eliminating corner cases which
3397  // we did not support very well anyway.
3398  // We have two input conditions:
3399  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3400  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3401  //    setting.
3402  //    Data segment page size is important for us because it defines the thread stack page
3403  //    size, which is needed for guard page handling, stack banging etc.
3404  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3405  //    and should be allocated with 64k pages.
3406  //
3407  // So, we do the following:
3408  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3409  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3410  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3411  // 64k          no              --- AIX 5.2 ? ---
3412  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3413
3414  // We explicitly leave no option to change page size, because only upgrading would work,
3415  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3416
3417  if (g_multipage_support.datapsize == SIZE_4K) {
3418    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3419    if (g_multipage_support.can_use_64K_pages) {
3420      // .. but we are able to use 64K pages dynamically.
3421      // This would be typical for java launchers which are not linked
3422      // with datapsize=64K (like, any other launcher but our own).
3423      //
3424      // In this case it would be smart to allocate the java heap with 64K
3425      // to get the performance benefit, and to fake 64k pages for the
3426      // data segment (when dealing with thread stacks).
3427      //
3428      // However, leave a possibility to downgrade to 4K, using
3429      // -XX:-Use64KPages.
3430      if (Use64KPages) {
3431        trcVerbose("64K page mode (faked for data segment)");
3432        Aix::_page_size = SIZE_64K;
3433      } else {
3434        trcVerbose("4K page mode (Use64KPages=off)");
3435        Aix::_page_size = SIZE_4K;
3436      }
3437    } else {
3438      // .. and not able to allocate 64k pages dynamically. Here, just
3439      // fall back to 4K paged mode and use mmap for everything.
3440      trcVerbose("4K page mode");
3441      Aix::_page_size = SIZE_4K;
3442      FLAG_SET_ERGO(bool, Use64KPages, false);
3443    }
3444  } else {
3445    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3446    //   This normally means that we can allocate 64k pages dynamically.
3447    //   (There is one special case where this may be false: EXTSHM=on.
3448    //    but we decided to not support that mode).
3449    assert0(g_multipage_support.can_use_64K_pages);
3450    Aix::_page_size = SIZE_64K;
3451    trcVerbose("64K page mode");
3452    FLAG_SET_ERGO(bool, Use64KPages, true);
3453  }
3454
3455  // Short-wire stack page size to base page size; if that works, we just remove
3456  // that stack page size altogether.
3457  Aix::_stack_page_size = Aix::_page_size;
3458
3459  // For now UseLargePages is just ignored.
3460  FLAG_SET_ERGO(bool, UseLargePages, false);
3461  _page_sizes[0] = 0;
3462
3463  // debug trace
3464  trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3465
3466  // Next, we need to initialize libo4 and libperfstat libraries.
3467  if (os::Aix::on_pase()) {
3468    os::Aix::initialize_libo4();
3469  } else {
3470    os::Aix::initialize_libperfstat();
3471  }
3472
3473  // Reset the perfstat information provided by ODM.
3474  if (os::Aix::on_aix()) {
3475    libperfstat::perfstat_reset();
3476  }
3477
3478  // Now initialze basic system properties. Note that for some of the values we
3479  // need libperfstat etc.
3480  os::Aix::initialize_system_info();
3481
3482  _initial_pid = getpid();
3483
3484  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3485
3486  init_random(1234567);
3487
3488  ThreadCritical::initialize();
3489
3490  // Main_thread points to the aboriginal thread.
3491  Aix::_main_thread = pthread_self();
3492
3493  initial_time_count = os::elapsed_counter();
3494
3495  // If the pagesize of the VM is greater than 8K determine the appropriate
3496  // number of initial guard pages. The user can change this with the
3497  // command line arguments, if needed.
3498  if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3499    StackYellowPages = 1;
3500    StackRedPages = 1;
3501    StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3502  }
3503}
3504
3505// This is called _after_ the global arguments have been parsed.
3506jint os::init_2(void) {
3507
3508  trcVerbose("processor count: %d", os::_processor_count);
3509  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3510
3511  // Initially build up the loaded dll map.
3512  LoadedLibraries::reload();
3513
3514  const int page_size = Aix::page_size();
3515  const int map_size = page_size;
3516
3517  address map_address = (address) MAP_FAILED;
3518  const int prot  = PROT_READ;
3519  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3520
3521  // Use optimized addresses for the polling page,
3522  // e.g. map it to a special 32-bit address.
3523  if (OptimizePollingPageLocation) {
3524    // architecture-specific list of address wishes:
3525    address address_wishes[] = {
3526      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3527      // PPC64: all address wishes are non-negative 32 bit values where
3528      // the lower 16 bits are all zero. we can load these addresses
3529      // with a single ppc_lis instruction.
3530      (address) 0x30000000, (address) 0x31000000,
3531      (address) 0x32000000, (address) 0x33000000,
3532      (address) 0x40000000, (address) 0x41000000,
3533      (address) 0x42000000, (address) 0x43000000,
3534      (address) 0x50000000, (address) 0x51000000,
3535      (address) 0x52000000, (address) 0x53000000,
3536      (address) 0x60000000, (address) 0x61000000,
3537      (address) 0x62000000, (address) 0x63000000
3538    };
3539    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3540
3541    // iterate over the list of address wishes:
3542    for (int i=0; i<address_wishes_length; i++) {
3543      // Try to map with current address wish.
3544      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3545      // fail if the address is already mapped.
3546      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3547                                     map_size, prot,
3548                                     flags | MAP_FIXED,
3549                                     -1, 0);
3550      if (Verbose) {
3551        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3552                address_wishes[i], map_address + (ssize_t)page_size);
3553      }
3554
3555      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3556        // Map succeeded and map_address is at wished address, exit loop.
3557        break;
3558      }
3559
3560      if (map_address != (address) MAP_FAILED) {
3561        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3562        ::munmap(map_address, map_size);
3563        map_address = (address) MAP_FAILED;
3564      }
3565      // Map failed, continue loop.
3566    }
3567  } // end OptimizePollingPageLocation
3568
3569  if (map_address == (address) MAP_FAILED) {
3570    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3571  }
3572  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3573  os::set_polling_page(map_address);
3574
3575  if (!UseMembar) {
3576    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3577    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3578    os::set_memory_serialize_page(mem_serialize_page);
3579
3580#ifndef PRODUCT
3581    if (Verbose && PrintMiscellaneous) {
3582      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3583    }
3584#endif
3585  }
3586
3587  // initialize suspend/resume support - must do this before signal_sets_init()
3588  if (SR_initialize() != 0) {
3589    perror("SR_initialize failed");
3590    return JNI_ERR;
3591  }
3592
3593  Aix::signal_sets_init();
3594  Aix::install_signal_handlers();
3595
3596  // Check minimum allowable stack size for thread creation and to initialize
3597  // the java system classes, including StackOverflowError - depends on page
3598  // size. Add a page for compiler2 recursion in main thread.
3599  // Add in 2*BytesPerWord times page size to account for VM stack during
3600  // class initialization depending on 32 or 64 bit VM.
3601  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3602            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3603                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3604
3605  os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3606
3607  size_t threadStackSizeInBytes = ThreadStackSize * K;
3608  if (threadStackSizeInBytes != 0 &&
3609      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3610    tty->print_cr("\nThe stack size specified is too small, "
3611                  "Specify at least %dk",
3612                  os::Aix::min_stack_allowed / K);
3613    return JNI_ERR;
3614  }
3615
3616  // Make the stack size a multiple of the page size so that
3617  // the yellow/red zones can be guarded.
3618  // Note that this can be 0, if no default stacksize was set.
3619  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3620
3621  Aix::libpthread_init();
3622
3623  if (MaxFDLimit) {
3624    // Set the number of file descriptors to max. print out error
3625    // if getrlimit/setrlimit fails but continue regardless.
3626    struct rlimit nbr_files;
3627    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3628    if (status != 0) {
3629      if (PrintMiscellaneous && (Verbose || WizardMode))
3630        perror("os::init_2 getrlimit failed");
3631    } else {
3632      nbr_files.rlim_cur = nbr_files.rlim_max;
3633      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3634      if (status != 0) {
3635        if (PrintMiscellaneous && (Verbose || WizardMode))
3636          perror("os::init_2 setrlimit failed");
3637      }
3638    }
3639  }
3640
3641  if (PerfAllowAtExitRegistration) {
3642    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3643    // Atexit functions can be delayed until process exit time, which
3644    // can be problematic for embedded VM situations. Embedded VMs should
3645    // call DestroyJavaVM() to assure that VM resources are released.
3646
3647    // Note: perfMemory_exit_helper atexit function may be removed in
3648    // the future if the appropriate cleanup code can be added to the
3649    // VM_Exit VMOperation's doit method.
3650    if (atexit(perfMemory_exit_helper) != 0) {
3651      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3652    }
3653  }
3654
3655  return JNI_OK;
3656}
3657
3658// Mark the polling page as unreadable
3659void os::make_polling_page_unreadable(void) {
3660  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3661    fatal("Could not disable polling page");
3662  }
3663};
3664
3665// Mark the polling page as readable
3666void os::make_polling_page_readable(void) {
3667  // Changed according to os_linux.cpp.
3668  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3669    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3670  }
3671};
3672
3673int os::active_processor_count() {
3674  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3675  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3676  return online_cpus;
3677}
3678
3679void os::set_native_thread_name(const char *name) {
3680  // Not yet implemented.
3681  return;
3682}
3683
3684bool os::distribute_processes(uint length, uint* distribution) {
3685  // Not yet implemented.
3686  return false;
3687}
3688
3689bool os::bind_to_processor(uint processor_id) {
3690  // Not yet implemented.
3691  return false;
3692}
3693
3694void os::SuspendedThreadTask::internal_do_task() {
3695  if (do_suspend(_thread->osthread())) {
3696    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3697    do_task(context);
3698    do_resume(_thread->osthread());
3699  }
3700}
3701
3702class PcFetcher : public os::SuspendedThreadTask {
3703public:
3704  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3705  ExtendedPC result();
3706protected:
3707  void do_task(const os::SuspendedThreadTaskContext& context);
3708private:
3709  ExtendedPC _epc;
3710};
3711
3712ExtendedPC PcFetcher::result() {
3713  guarantee(is_done(), "task is not done yet.");
3714  return _epc;
3715}
3716
3717void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3718  Thread* thread = context.thread();
3719  OSThread* osthread = thread->osthread();
3720  if (osthread->ucontext() != NULL) {
3721    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3722  } else {
3723    // NULL context is unexpected, double-check this is the VMThread.
3724    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3725  }
3726}
3727
3728// Suspends the target using the signal mechanism and then grabs the PC before
3729// resuming the target. Used by the flat-profiler only
3730ExtendedPC os::get_thread_pc(Thread* thread) {
3731  // Make sure that it is called by the watcher for the VMThread.
3732  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3733  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3734
3735  PcFetcher fetcher(thread);
3736  fetcher.run();
3737  return fetcher.result();
3738}
3739
3740////////////////////////////////////////////////////////////////////////////////
3741// debug support
3742
3743static address same_page(address x, address y) {
3744  intptr_t page_bits = -os::vm_page_size();
3745  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3746    return x;
3747  else if (x > y)
3748    return (address)(intptr_t(y) | ~page_bits) + 1;
3749  else
3750    return (address)(intptr_t(y) & page_bits);
3751}
3752
3753bool os::find(address addr, outputStream* st) {
3754
3755  st->print(PTR_FORMAT ": ", addr);
3756
3757  loaded_module_t lm;
3758  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3759      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3760    st->print("%s", lm.path);
3761    return true;
3762  }
3763
3764  return false;
3765}
3766
3767////////////////////////////////////////////////////////////////////////////////
3768// misc
3769
3770// This does not do anything on Aix. This is basically a hook for being
3771// able to use structured exception handling (thread-local exception filters)
3772// on, e.g., Win32.
3773void
3774os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3775                         JavaCallArguments* args, Thread* thread) {
3776  f(value, method, args, thread);
3777}
3778
3779void os::print_statistics() {
3780}
3781
3782bool os::message_box(const char* title, const char* message) {
3783  int i;
3784  fdStream err(defaultStream::error_fd());
3785  for (i = 0; i < 78; i++) err.print_raw("=");
3786  err.cr();
3787  err.print_raw_cr(title);
3788  for (i = 0; i < 78; i++) err.print_raw("-");
3789  err.cr();
3790  err.print_raw_cr(message);
3791  for (i = 0; i < 78; i++) err.print_raw("=");
3792  err.cr();
3793
3794  char buf[16];
3795  // Prevent process from exiting upon "read error" without consuming all CPU
3796  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3797
3798  return buf[0] == 'y' || buf[0] == 'Y';
3799}
3800
3801int os::stat(const char *path, struct stat *sbuf) {
3802  char pathbuf[MAX_PATH];
3803  if (strlen(path) > MAX_PATH - 1) {
3804    errno = ENAMETOOLONG;
3805    return -1;
3806  }
3807  os::native_path(strcpy(pathbuf, path));
3808  return ::stat(pathbuf, sbuf);
3809}
3810
3811bool os::check_heap(bool force) {
3812  return true;
3813}
3814
3815// Is a (classpath) directory empty?
3816bool os::dir_is_empty(const char* path) {
3817  DIR *dir = NULL;
3818  struct dirent *ptr;
3819
3820  dir = opendir(path);
3821  if (dir == NULL) return true;
3822
3823  /* Scan the directory */
3824  bool result = true;
3825  char buf[sizeof(struct dirent) + MAX_PATH];
3826  while (result && (ptr = ::readdir(dir)) != NULL) {
3827    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3828      result = false;
3829    }
3830  }
3831  closedir(dir);
3832  return result;
3833}
3834
3835// This code originates from JDK's sysOpen and open64_w
3836// from src/solaris/hpi/src/system_md.c
3837
3838int os::open(const char *path, int oflag, int mode) {
3839
3840  if (strlen(path) > MAX_PATH - 1) {
3841    errno = ENAMETOOLONG;
3842    return -1;
3843  }
3844  int fd;
3845
3846  fd = ::open64(path, oflag, mode);
3847  if (fd == -1) return -1;
3848
3849  // If the open succeeded, the file might still be a directory.
3850  {
3851    struct stat64 buf64;
3852    int ret = ::fstat64(fd, &buf64);
3853    int st_mode = buf64.st_mode;
3854
3855    if (ret != -1) {
3856      if ((st_mode & S_IFMT) == S_IFDIR) {
3857        errno = EISDIR;
3858        ::close(fd);
3859        return -1;
3860      }
3861    } else {
3862      ::close(fd);
3863      return -1;
3864    }
3865  }
3866
3867  // All file descriptors that are opened in the JVM and not
3868  // specifically destined for a subprocess should have the
3869  // close-on-exec flag set. If we don't set it, then careless 3rd
3870  // party native code might fork and exec without closing all
3871  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3872  // UNIXProcess.c), and this in turn might:
3873  //
3874  // - cause end-of-file to fail to be detected on some file
3875  //   descriptors, resulting in mysterious hangs, or
3876  //
3877  // - might cause an fopen in the subprocess to fail on a system
3878  //   suffering from bug 1085341.
3879  //
3880  // (Yes, the default setting of the close-on-exec flag is a Unix
3881  // design flaw.)
3882  //
3883  // See:
3884  // 1085341: 32-bit stdio routines should support file descriptors >255
3885  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3886  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3887#ifdef FD_CLOEXEC
3888  {
3889    int flags = ::fcntl(fd, F_GETFD);
3890    if (flags != -1)
3891      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3892  }
3893#endif
3894
3895  return fd;
3896}
3897
3898// create binary file, rewriting existing file if required
3899int os::create_binary_file(const char* path, bool rewrite_existing) {
3900  int oflags = O_WRONLY | O_CREAT;
3901  if (!rewrite_existing) {
3902    oflags |= O_EXCL;
3903  }
3904  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3905}
3906
3907// return current position of file pointer
3908jlong os::current_file_offset(int fd) {
3909  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3910}
3911
3912// move file pointer to the specified offset
3913jlong os::seek_to_file_offset(int fd, jlong offset) {
3914  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3915}
3916
3917// This code originates from JDK's sysAvailable
3918// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3919
3920int os::available(int fd, jlong *bytes) {
3921  jlong cur, end;
3922  int mode;
3923  struct stat64 buf64;
3924
3925  if (::fstat64(fd, &buf64) >= 0) {
3926    mode = buf64.st_mode;
3927    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3928      int n;
3929      if (::ioctl(fd, FIONREAD, &n) >= 0) {
3930        *bytes = n;
3931        return 1;
3932      }
3933    }
3934  }
3935  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3936    return 0;
3937  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3938    return 0;
3939  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3940    return 0;
3941  }
3942  *bytes = end - cur;
3943  return 1;
3944}
3945
3946// Map a block of memory.
3947char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3948                        char *addr, size_t bytes, bool read_only,
3949                        bool allow_exec) {
3950  int prot;
3951  int flags = MAP_PRIVATE;
3952
3953  if (read_only) {
3954    prot = PROT_READ;
3955    flags = MAP_SHARED;
3956  } else {
3957    prot = PROT_READ | PROT_WRITE;
3958    flags = MAP_PRIVATE;
3959  }
3960
3961  if (allow_exec) {
3962    prot |= PROT_EXEC;
3963  }
3964
3965  if (addr != NULL) {
3966    flags |= MAP_FIXED;
3967  }
3968
3969  // Allow anonymous mappings if 'fd' is -1.
3970  if (fd == -1) {
3971    flags |= MAP_ANONYMOUS;
3972  }
3973
3974  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3975                                     fd, file_offset);
3976  if (mapped_address == MAP_FAILED) {
3977    return NULL;
3978  }
3979  return mapped_address;
3980}
3981
3982// Remap a block of memory.
3983char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3984                          char *addr, size_t bytes, bool read_only,
3985                          bool allow_exec) {
3986  // same as map_memory() on this OS
3987  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3988                        allow_exec);
3989}
3990
3991// Unmap a block of memory.
3992bool os::pd_unmap_memory(char* addr, size_t bytes) {
3993  return munmap(addr, bytes) == 0;
3994}
3995
3996// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3997// are used by JVM M&M and JVMTI to get user+sys or user CPU time
3998// of a thread.
3999//
4000// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4001// the fast estimate available on the platform.
4002
4003jlong os::current_thread_cpu_time() {
4004  // return user + sys since the cost is the same
4005  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4006  assert(n >= 0, "negative CPU time");
4007  return n;
4008}
4009
4010jlong os::thread_cpu_time(Thread* thread) {
4011  // consistent with what current_thread_cpu_time() returns
4012  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4013  assert(n >= 0, "negative CPU time");
4014  return n;
4015}
4016
4017jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4018  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4019  assert(n >= 0, "negative CPU time");
4020  return n;
4021}
4022
4023static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4024  bool error = false;
4025
4026  jlong sys_time = 0;
4027  jlong user_time = 0;
4028
4029  // Reimplemented using getthrds64().
4030  //
4031  // Works like this:
4032  // For the thread in question, get the kernel thread id. Then get the
4033  // kernel thread statistics using that id.
4034  //
4035  // This only works of course when no pthread scheduling is used,
4036  // i.e. there is a 1:1 relationship to kernel threads.
4037  // On AIX, see AIXTHREAD_SCOPE variable.
4038
4039  pthread_t pthtid = thread->osthread()->pthread_id();
4040
4041  // retrieve kernel thread id for the pthread:
4042  tid64_t tid = 0;
4043  struct __pthrdsinfo pinfo;
4044  // I just love those otherworldly IBM APIs which force me to hand down
4045  // dummy buffers for stuff I dont care for...
4046  char dummy[1];
4047  int dummy_size = sizeof(dummy);
4048  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4049                          dummy, &dummy_size) == 0) {
4050    tid = pinfo.__pi_tid;
4051  } else {
4052    tty->print_cr("pthread_getthrds_np failed.");
4053    error = true;
4054  }
4055
4056  // retrieve kernel timing info for that kernel thread
4057  if (!error) {
4058    struct thrdentry64 thrdentry;
4059    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4060      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4061      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4062    } else {
4063      tty->print_cr("pthread_getthrds_np failed.");
4064      error = true;
4065    }
4066  }
4067
4068  if (p_sys_time) {
4069    *p_sys_time = sys_time;
4070  }
4071
4072  if (p_user_time) {
4073    *p_user_time = user_time;
4074  }
4075
4076  if (error) {
4077    return false;
4078  }
4079
4080  return true;
4081}
4082
4083jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4084  jlong sys_time;
4085  jlong user_time;
4086
4087  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4088    return -1;
4089  }
4090
4091  return user_sys_cpu_time ? sys_time + user_time : user_time;
4092}
4093
4094void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4095  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4096  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4097  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4098  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4099}
4100
4101void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4102  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4103  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4104  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4105  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4106}
4107
4108bool os::is_thread_cpu_time_supported() {
4109  return true;
4110}
4111
4112// System loadavg support. Returns -1 if load average cannot be obtained.
4113// For now just return the system wide load average (no processor sets).
4114int os::loadavg(double values[], int nelem) {
4115
4116  // Implemented using libperfstat on AIX.
4117
4118  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4119  guarantee(values, "argument error");
4120
4121  if (os::Aix::on_pase()) {
4122    Unimplemented();
4123    return -1;
4124  } else {
4125    // AIX: use libperfstat
4126    //
4127    // See also:
4128    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4129    // /usr/include/libperfstat.h:
4130
4131    // Use the already AIX version independent get_cpuinfo.
4132    os::Aix::cpuinfo_t ci;
4133    if (os::Aix::get_cpuinfo(&ci)) {
4134      for (int i = 0; i < nelem; i++) {
4135        values[i] = ci.loadavg[i];
4136      }
4137    } else {
4138      return -1;
4139    }
4140    return nelem;
4141  }
4142}
4143
4144void os::pause() {
4145  char filename[MAX_PATH];
4146  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4147    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4148  } else {
4149    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4150  }
4151
4152  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4153  if (fd != -1) {
4154    struct stat buf;
4155    ::close(fd);
4156    while (::stat(filename, &buf) == 0) {
4157      (void)::poll(NULL, 0, 100);
4158    }
4159  } else {
4160    jio_fprintf(stderr,
4161      "Could not open pause file '%s', continuing immediately.\n", filename);
4162  }
4163}
4164
4165bool os::Aix::is_primordial_thread() {
4166  if (pthread_self() == (pthread_t)1) {
4167    return true;
4168  } else {
4169    return false;
4170  }
4171}
4172
4173// OS recognitions (PASE/AIX, OS level) call this before calling any
4174// one of Aix::on_pase(), Aix::os_version() static
4175void os::Aix::initialize_os_info() {
4176
4177  assert(_on_pase == -1 && _os_version == -1, "already called.");
4178
4179  struct utsname uts;
4180  memset(&uts, 0, sizeof(uts));
4181  strcpy(uts.sysname, "?");
4182  if (::uname(&uts) == -1) {
4183    trc("uname failed (%d)", errno);
4184    guarantee(0, "Could not determine whether we run on AIX or PASE");
4185  } else {
4186    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4187               "node \"%s\" machine \"%s\"\n",
4188               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4189    const int major = atoi(uts.version);
4190    assert(major > 0, "invalid OS version");
4191    const int minor = atoi(uts.release);
4192    assert(minor > 0, "invalid OS release");
4193    _os_version = (major << 8) | minor;
4194    if (strcmp(uts.sysname, "OS400") == 0) {
4195      Unimplemented();
4196    } else if (strcmp(uts.sysname, "AIX") == 0) {
4197      // We run on AIX. We do not support versions older than AIX 5.3.
4198      _on_pase = 0;
4199      if (_os_version < 0x0503) {
4200        trc("AIX release older than AIX 5.3 not supported.");
4201        assert(false, "AIX release too old.");
4202      } else {
4203        trcVerbose("We run on AIX %d.%d\n", major, minor);
4204      }
4205    } else {
4206      assert(false, "unknown OS");
4207    }
4208  }
4209
4210  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4211} // end: os::Aix::initialize_os_info()
4212
4213// Scan environment for important settings which might effect the VM.
4214// Trace out settings. Warn about invalid settings and/or correct them.
4215//
4216// Must run after os::Aix::initialue_os_info().
4217void os::Aix::scan_environment() {
4218
4219  char* p;
4220  int rc;
4221
4222  // Warn explicity if EXTSHM=ON is used. That switch changes how
4223  // System V shared memory behaves. One effect is that page size of
4224  // shared memory cannot be change dynamically, effectivly preventing
4225  // large pages from working.
4226  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4227  // recommendation is (in OSS notes) to switch it off.
4228  p = ::getenv("EXTSHM");
4229  if (Verbose) {
4230    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4231  }
4232  if (p && strcasecmp(p, "ON") == 0) {
4233    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4234    _extshm = 1;
4235  } else {
4236    _extshm = 0;
4237  }
4238
4239  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4240  // Not tested, not supported.
4241  //
4242  // Note that it might be worth the trouble to test and to require it, if only to
4243  // get useful return codes for mprotect.
4244  //
4245  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4246  // exec() ? before loading the libjvm ? ....)
4247  p = ::getenv("XPG_SUS_ENV");
4248  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4249  if (p && strcmp(p, "ON") == 0) {
4250    _xpg_sus_mode = 1;
4251    trc("Unsupported setting: XPG_SUS_ENV=ON");
4252    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4253    // clobber address ranges. If we ever want to support that, we have to do some
4254    // testing first.
4255    guarantee(false, "XPG_SUS_ENV=ON not supported");
4256  } else {
4257    _xpg_sus_mode = 0;
4258  }
4259
4260  // Switch off AIX internal (pthread) guard pages. This has
4261  // immediate effect for any pthread_create calls which follow.
4262  p = ::getenv("AIXTHREAD_GUARDPAGES");
4263  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4264  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4265  guarantee(rc == 0, "");
4266
4267} // end: os::Aix::scan_environment()
4268
4269// PASE: initialize the libo4 library (AS400 PASE porting library).
4270void os::Aix::initialize_libo4() {
4271  Unimplemented();
4272}
4273
4274// AIX: initialize the libperfstat library (we load this dynamically
4275// because it is only available on AIX.
4276void os::Aix::initialize_libperfstat() {
4277
4278  assert(os::Aix::on_aix(), "AIX only");
4279
4280  if (!libperfstat::init()) {
4281    trc("libperfstat initialization failed.");
4282    assert(false, "libperfstat initialization failed");
4283  } else {
4284    if (Verbose) {
4285      fprintf(stderr, "libperfstat initialized.\n");
4286    }
4287  }
4288} // end: os::Aix::initialize_libperfstat
4289
4290/////////////////////////////////////////////////////////////////////////////
4291// thread stack
4292
4293// Function to query the current stack size using pthread_getthrds_np.
4294static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4295  // This only works when invoked on a pthread. As we agreed not to use
4296  // primordial threads anyway, I assert here.
4297  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4298
4299  // Information about this api can be found (a) in the pthread.h header and
4300  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4301  //
4302  // The use of this API to find out the current stack is kind of undefined.
4303  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4304  // enough for cases where I let the pthread library create its stacks. For cases
4305  // where I create an own stack and pass this to pthread_create, it seems not to
4306  // work (the returned stack size in that case is 0).
4307
4308  pthread_t tid = pthread_self();
4309  struct __pthrdsinfo pinfo;
4310  char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4311  int dummy_size = sizeof(dummy);
4312
4313  memset(&pinfo, 0, sizeof(pinfo));
4314
4315  const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4316                                     sizeof(pinfo), dummy, &dummy_size);
4317
4318  if (rc != 0) {
4319    assert0(false);
4320    trcVerbose("pthread_getthrds_np failed (%d)", rc);
4321    return false;
4322  }
4323  guarantee0(pinfo.__pi_stackend);
4324
4325  // The following can happen when invoking pthread_getthrds_np on a pthread running
4326  // on a user provided stack (when handing down a stack to pthread create, see
4327  // pthread_attr_setstackaddr).
4328  // Not sure what to do here - I feel inclined to forbid this use case completely.
4329  guarantee0(pinfo.__pi_stacksize);
4330
4331  // Note: we get three values from pthread_getthrds_np:
4332  //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4333  //
4334  // high addr    ---------------------
4335  //
4336  //    |         pthread internal data, like ~2K
4337  //    |
4338  //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4339  //    |
4340  //    |
4341  //    |
4342  //    |
4343  //    |
4344  //    |
4345  //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4346  //    |
4347  //    |          padding to align the following AIX guard pages, if enabled.
4348  //    |
4349  //    V          ---------------------   __pi_stackaddr
4350  //
4351  // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4352  //
4353
4354  address stack_base = (address)(pinfo.__pi_stackend);
4355  address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,  os::vm_page_size());
4356  size_t stack_size = stack_base - stack_low_addr;
4357
4358  if (p_stack_base) {
4359    *p_stack_base = stack_base;
4360  }
4361
4362  if (p_stack_size) {
4363    *p_stack_size = stack_size;
4364  }
4365
4366  return true;
4367}
4368
4369// Get the current stack base from the OS (actually, the pthread library).
4370address os::current_stack_base() {
4371  address p;
4372  query_stack_dimensions(&p, 0);
4373  return p;
4374}
4375
4376// Get the current stack size from the OS (actually, the pthread library).
4377size_t os::current_stack_size() {
4378  size_t s;
4379  query_stack_dimensions(0, &s);
4380  return s;
4381}
4382
4383// Refer to the comments in os_solaris.cpp park-unpark.
4384//
4385// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4386// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4387// For specifics regarding the bug see GLIBC BUGID 261237 :
4388//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4389// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4390// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4391// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4392// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4393// and monitorenter when we're using 1-0 locking. All those operations may result in
4394// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4395// of libpthread avoids the problem, but isn't practical.
4396//
4397// Possible remedies:
4398//
4399// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4400//      This is palliative and probabilistic, however. If the thread is preempted
4401//      between the call to compute_abstime() and pthread_cond_timedwait(), more
4402//      than the minimum period may have passed, and the abstime may be stale (in the
4403//      past) resultin in a hang. Using this technique reduces the odds of a hang
4404//      but the JVM is still vulnerable, particularly on heavily loaded systems.
4405//
4406// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4407//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4408//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4409//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4410//      thread.
4411//
4412// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4413//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4414//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4415//      This also works well. In fact it avoids kernel-level scalability impediments
4416//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4417//      timers in a graceful fashion.
4418//
4419// 4.   When the abstime value is in the past it appears that control returns
4420//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4421//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4422//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4423//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4424//      It may be possible to avoid reinitialization by checking the return
4425//      value from pthread_cond_timedwait(). In addition to reinitializing the
4426//      condvar we must establish the invariant that cond_signal() is only called
4427//      within critical sections protected by the adjunct mutex. This prevents
4428//      cond_signal() from "seeing" a condvar that's in the midst of being
4429//      reinitialized or that is corrupt. Sadly, this invariant obviates the
4430//      desirable signal-after-unlock optimization that avoids futile context switching.
4431//
4432//      I'm also concerned that some versions of NTPL might allocate an auxilliary
4433//      structure when a condvar is used or initialized. cond_destroy() would
4434//      release the helper structure. Our reinitialize-after-timedwait fix
4435//      put excessive stress on malloc/free and locks protecting the c-heap.
4436//
4437// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4438// It may be possible to refine (4) by checking the kernel and NTPL verisons
4439// and only enabling the work-around for vulnerable environments.
4440
4441// utility to compute the abstime argument to timedwait:
4442// millis is the relative timeout time
4443// abstime will be the absolute timeout time
4444// TODO: replace compute_abstime() with unpackTime()
4445
4446static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4447  if (millis < 0) millis = 0;
4448  struct timeval now;
4449  int status = gettimeofday(&now, NULL);
4450  assert(status == 0, "gettimeofday");
4451  jlong seconds = millis / 1000;
4452  millis %= 1000;
4453  if (seconds > 50000000) { // see man cond_timedwait(3T)
4454    seconds = 50000000;
4455  }
4456  abstime->tv_sec = now.tv_sec  + seconds;
4457  long       usec = now.tv_usec + millis * 1000;
4458  if (usec >= 1000000) {
4459    abstime->tv_sec += 1;
4460    usec -= 1000000;
4461  }
4462  abstime->tv_nsec = usec * 1000;
4463  return abstime;
4464}
4465
4466// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4467// Conceptually TryPark() should be equivalent to park(0).
4468
4469int os::PlatformEvent::TryPark() {
4470  for (;;) {
4471    const int v = _Event;
4472    guarantee ((v == 0) || (v == 1), "invariant");
4473    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4474  }
4475}
4476
4477void os::PlatformEvent::park() {       // AKA "down()"
4478  // Invariant: Only the thread associated with the Event/PlatformEvent
4479  // may call park().
4480  // TODO: assert that _Assoc != NULL or _Assoc == Self
4481  int v;
4482  for (;;) {
4483    v = _Event;
4484    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4485  }
4486  guarantee (v >= 0, "invariant");
4487  if (v == 0) {
4488    // Do this the hard way by blocking ...
4489    int status = pthread_mutex_lock(_mutex);
4490    assert_status(status == 0, status, "mutex_lock");
4491    guarantee (_nParked == 0, "invariant");
4492    ++ _nParked;
4493    while (_Event < 0) {
4494      status = pthread_cond_wait(_cond, _mutex);
4495      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4496    }
4497    -- _nParked;
4498
4499    // In theory we could move the ST of 0 into _Event past the unlock(),
4500    // but then we'd need a MEMBAR after the ST.
4501    _Event = 0;
4502    status = pthread_mutex_unlock(_mutex);
4503    assert_status(status == 0, status, "mutex_unlock");
4504  }
4505  guarantee (_Event >= 0, "invariant");
4506}
4507
4508int os::PlatformEvent::park(jlong millis) {
4509  guarantee (_nParked == 0, "invariant");
4510
4511  int v;
4512  for (;;) {
4513    v = _Event;
4514    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4515  }
4516  guarantee (v >= 0, "invariant");
4517  if (v != 0) return OS_OK;
4518
4519  // We do this the hard way, by blocking the thread.
4520  // Consider enforcing a minimum timeout value.
4521  struct timespec abst;
4522  compute_abstime(&abst, millis);
4523
4524  int ret = OS_TIMEOUT;
4525  int status = pthread_mutex_lock(_mutex);
4526  assert_status(status == 0, status, "mutex_lock");
4527  guarantee (_nParked == 0, "invariant");
4528  ++_nParked;
4529
4530  // Object.wait(timo) will return because of
4531  // (a) notification
4532  // (b) timeout
4533  // (c) thread.interrupt
4534  //
4535  // Thread.interrupt and object.notify{All} both call Event::set.
4536  // That is, we treat thread.interrupt as a special case of notification.
4537  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4538  // We assume all ETIME returns are valid.
4539  //
4540  // TODO: properly differentiate simultaneous notify+interrupt.
4541  // In that case, we should propagate the notify to another waiter.
4542
4543  while (_Event < 0) {
4544    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4545    assert_status(status == 0 || status == ETIMEDOUT,
4546                  status, "cond_timedwait");
4547    if (!FilterSpuriousWakeups) break;         // previous semantics
4548    if (status == ETIMEDOUT) break;
4549    // We consume and ignore EINTR and spurious wakeups.
4550  }
4551  --_nParked;
4552  if (_Event >= 0) {
4553     ret = OS_OK;
4554  }
4555  _Event = 0;
4556  status = pthread_mutex_unlock(_mutex);
4557  assert_status(status == 0, status, "mutex_unlock");
4558  assert (_nParked == 0, "invariant");
4559  return ret;
4560}
4561
4562void os::PlatformEvent::unpark() {
4563  int v, AnyWaiters;
4564  for (;;) {
4565    v = _Event;
4566    if (v > 0) {
4567      // The LD of _Event could have reordered or be satisfied
4568      // by a read-aside from this processor's write buffer.
4569      // To avoid problems execute a barrier and then
4570      // ratify the value.
4571      OrderAccess::fence();
4572      if (_Event == v) return;
4573      continue;
4574    }
4575    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4576  }
4577  if (v < 0) {
4578    // Wait for the thread associated with the event to vacate
4579    int status = pthread_mutex_lock(_mutex);
4580    assert_status(status == 0, status, "mutex_lock");
4581    AnyWaiters = _nParked;
4582
4583    if (AnyWaiters != 0) {
4584      // We intentional signal *after* dropping the lock
4585      // to avoid a common class of futile wakeups.
4586      status = pthread_cond_signal(_cond);
4587      assert_status(status == 0, status, "cond_signal");
4588    }
4589    // Mutex should be locked for pthread_cond_signal(_cond).
4590    status = pthread_mutex_unlock(_mutex);
4591    assert_status(status == 0, status, "mutex_unlock");
4592  }
4593
4594  // Note that we signal() _after dropping the lock for "immortal" Events.
4595  // This is safe and avoids a common class of futile wakeups. In rare
4596  // circumstances this can cause a thread to return prematurely from
4597  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4598  // simply re-test the condition and re-park itself.
4599}
4600
4601
4602// JSR166
4603// -------------------------------------------------------
4604
4605//
4606// The solaris and linux implementations of park/unpark are fairly
4607// conservative for now, but can be improved. They currently use a
4608// mutex/condvar pair, plus a a count.
4609// Park decrements count if > 0, else does a condvar wait. Unpark
4610// sets count to 1 and signals condvar. Only one thread ever waits
4611// on the condvar. Contention seen when trying to park implies that someone
4612// is unparking you, so don't wait. And spurious returns are fine, so there
4613// is no need to track notifications.
4614//
4615
4616#define MAX_SECS 100000000
4617//
4618// This code is common to linux and solaris and will be moved to a
4619// common place in dolphin.
4620//
4621// The passed in time value is either a relative time in nanoseconds
4622// or an absolute time in milliseconds. Either way it has to be unpacked
4623// into suitable seconds and nanoseconds components and stored in the
4624// given timespec structure.
4625// Given time is a 64-bit value and the time_t used in the timespec is only
4626// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4627// overflow if times way in the future are given. Further on Solaris versions
4628// prior to 10 there is a restriction (see cond_timedwait) that the specified
4629// number of seconds, in abstime, is less than current_time + 100,000,000.
4630// As it will be 28 years before "now + 100000000" will overflow we can
4631// ignore overflow and just impose a hard-limit on seconds using the value
4632// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4633// years from "now".
4634//
4635
4636static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4637  assert (time > 0, "convertTime");
4638
4639  struct timeval now;
4640  int status = gettimeofday(&now, NULL);
4641  assert(status == 0, "gettimeofday");
4642
4643  time_t max_secs = now.tv_sec + MAX_SECS;
4644
4645  if (isAbsolute) {
4646    jlong secs = time / 1000;
4647    if (secs > max_secs) {
4648      absTime->tv_sec = max_secs;
4649    }
4650    else {
4651      absTime->tv_sec = secs;
4652    }
4653    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4654  }
4655  else {
4656    jlong secs = time / NANOSECS_PER_SEC;
4657    if (secs >= MAX_SECS) {
4658      absTime->tv_sec = max_secs;
4659      absTime->tv_nsec = 0;
4660    }
4661    else {
4662      absTime->tv_sec = now.tv_sec + secs;
4663      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4664      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4665        absTime->tv_nsec -= NANOSECS_PER_SEC;
4666        ++absTime->tv_sec; // note: this must be <= max_secs
4667      }
4668    }
4669  }
4670  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4671  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4672  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4673  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4674}
4675
4676void Parker::park(bool isAbsolute, jlong time) {
4677  // Optional fast-path check:
4678  // Return immediately if a permit is available.
4679  if (_counter > 0) {
4680    _counter = 0;
4681    OrderAccess::fence();
4682    return;
4683  }
4684
4685  Thread* thread = Thread::current();
4686  assert(thread->is_Java_thread(), "Must be JavaThread");
4687  JavaThread *jt = (JavaThread *)thread;
4688
4689  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4690  // Check interrupt before trying to wait
4691  if (Thread::is_interrupted(thread, false)) {
4692    return;
4693  }
4694
4695  // Next, demultiplex/decode time arguments
4696  timespec absTime;
4697  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4698    return;
4699  }
4700  if (time > 0) {
4701    unpackTime(&absTime, isAbsolute, time);
4702  }
4703
4704  // Enter safepoint region
4705  // Beware of deadlocks such as 6317397.
4706  // The per-thread Parker:: mutex is a classic leaf-lock.
4707  // In particular a thread must never block on the Threads_lock while
4708  // holding the Parker:: mutex. If safepoints are pending both the
4709  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4710  ThreadBlockInVM tbivm(jt);
4711
4712  // Don't wait if cannot get lock since interference arises from
4713  // unblocking. Also. check interrupt before trying wait
4714  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4715    return;
4716  }
4717
4718  int status;
4719  if (_counter > 0) { // no wait needed
4720    _counter = 0;
4721    status = pthread_mutex_unlock(_mutex);
4722    assert (status == 0, "invariant");
4723    OrderAccess::fence();
4724    return;
4725  }
4726
4727#ifdef ASSERT
4728  // Don't catch signals while blocked; let the running threads have the signals.
4729  // (This allows a debugger to break into the running thread.)
4730  sigset_t oldsigs;
4731  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4732  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4733#endif
4734
4735  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4736  jt->set_suspend_equivalent();
4737  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4738
4739  if (time == 0) {
4740    status = pthread_cond_wait (_cond, _mutex);
4741  } else {
4742    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4743    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4744      pthread_cond_destroy (_cond);
4745      pthread_cond_init    (_cond, NULL);
4746    }
4747  }
4748  assert_status(status == 0 || status == EINTR ||
4749                status == ETIME || status == ETIMEDOUT,
4750                status, "cond_timedwait");
4751
4752#ifdef ASSERT
4753  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4754#endif
4755
4756  _counter = 0;
4757  status = pthread_mutex_unlock(_mutex);
4758  assert_status(status == 0, status, "invariant");
4759  // If externally suspended while waiting, re-suspend
4760  if (jt->handle_special_suspend_equivalent_condition()) {
4761    jt->java_suspend_self();
4762  }
4763
4764  OrderAccess::fence();
4765}
4766
4767void Parker::unpark() {
4768  int s, status;
4769  status = pthread_mutex_lock(_mutex);
4770  assert (status == 0, "invariant");
4771  s = _counter;
4772  _counter = 1;
4773  if (s < 1) {
4774    if (WorkAroundNPTLTimedWaitHang) {
4775      status = pthread_cond_signal (_cond);
4776      assert (status == 0, "invariant");
4777      status = pthread_mutex_unlock(_mutex);
4778      assert (status == 0, "invariant");
4779    } else {
4780      status = pthread_mutex_unlock(_mutex);
4781      assert (status == 0, "invariant");
4782      status = pthread_cond_signal (_cond);
4783      assert (status == 0, "invariant");
4784    }
4785  } else {
4786    pthread_mutex_unlock(_mutex);
4787    assert (status == 0, "invariant");
4788  }
4789}
4790
4791extern char** environ;
4792
4793// Run the specified command in a separate process. Return its exit value,
4794// or -1 on failure (e.g. can't fork a new process).
4795// Unlike system(), this function can be called from signal handler. It
4796// doesn't block SIGINT et al.
4797int os::fork_and_exec(char* cmd) {
4798  char * argv[4] = {"sh", "-c", cmd, NULL};
4799
4800  pid_t pid = fork();
4801
4802  if (pid < 0) {
4803    // fork failed
4804    return -1;
4805
4806  } else if (pid == 0) {
4807    // child process
4808
4809    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4810    execve("/usr/bin/sh", argv, environ);
4811
4812    // execve failed
4813    _exit(-1);
4814
4815  } else {
4816    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4817    // care about the actual exit code, for now.
4818
4819    int status;
4820
4821    // Wait for the child process to exit. This returns immediately if
4822    // the child has already exited. */
4823    while (waitpid(pid, &status, 0) < 0) {
4824      switch (errno) {
4825        case ECHILD: return 0;
4826        case EINTR: break;
4827        default: return -1;
4828      }
4829    }
4830
4831    if (WIFEXITED(status)) {
4832      // The child exited normally; get its exit code.
4833      return WEXITSTATUS(status);
4834    } else if (WIFSIGNALED(status)) {
4835      // The child exited because of a signal.
4836      // The best value to return is 0x80 + signal number,
4837      // because that is what all Unix shells do, and because
4838      // it allows callers to distinguish between process exit and
4839      // process death by signal.
4840      return 0x80 + WTERMSIG(status);
4841    } else {
4842      // Unknown exit code; pass it through.
4843      return status;
4844    }
4845  }
4846  return -1;
4847}
4848
4849// is_headless_jre()
4850//
4851// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4852// in order to report if we are running in a headless jre.
4853//
4854// Since JDK8 xawt/libmawt.so is moved into the same directory
4855// as libawt.so, and renamed libawt_xawt.so
4856bool os::is_headless_jre() {
4857  struct stat statbuf;
4858  char buf[MAXPATHLEN];
4859  char libmawtpath[MAXPATHLEN];
4860  const char *xawtstr = "/xawt/libmawt.so";
4861  const char *new_xawtstr = "/libawt_xawt.so";
4862
4863  char *p;
4864
4865  // Get path to libjvm.so
4866  os::jvm_path(buf, sizeof(buf));
4867
4868  // Get rid of libjvm.so
4869  p = strrchr(buf, '/');
4870  if (p == NULL) return false;
4871  else *p = '\0';
4872
4873  // Get rid of client or server
4874  p = strrchr(buf, '/');
4875  if (p == NULL) return false;
4876  else *p = '\0';
4877
4878  // check xawt/libmawt.so
4879  strcpy(libmawtpath, buf);
4880  strcat(libmawtpath, xawtstr);
4881  if (::stat(libmawtpath, &statbuf) == 0) return false;
4882
4883  // check libawt_xawt.so
4884  strcpy(libmawtpath, buf);
4885  strcat(libmawtpath, new_xawtstr);
4886  if (::stat(libmawtpath, &statbuf) == 0) return false;
4887
4888  return true;
4889}
4890
4891// Get the default path to the core file
4892// Returns the length of the string
4893int os::get_core_path(char* buffer, size_t bufferSize) {
4894  const char* p = get_current_directory(buffer, bufferSize);
4895
4896  if (p == NULL) {
4897    assert(p != NULL, "failed to get current directory");
4898    return 0;
4899  }
4900
4901  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4902                                               p, current_process_id());
4903
4904  return strlen(buffer);
4905}
4906
4907#ifndef PRODUCT
4908void TestReserveMemorySpecial_test() {
4909  // No tests available for this platform
4910}
4911#endif
4912
4913bool os::start_debugging(char *buf, int buflen)��{
4914  int len = (int)strlen(buf);
4915  char *p = &buf[len];
4916
4917  jio_snprintf(p, buflen -len,
4918                 "\n\n"
4919                 "Do you want to debug the problem?\n\n"
4920                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4921                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4922                 "Otherwise, press RETURN to abort...",
4923                 os::current_process_id(),
4924                 os::current_thread_id(), thread_self());
4925
4926  bool yes = os::message_box("Unexpected Error", buf);
4927
4928  if (yes) {
4929    // yes, user asked VM to launch debugger
4930    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4931
4932    os::fork_and_exec(buf);
4933    yes = false;
4934  }
4935  return yes;
4936}
4937