os_aix.cpp revision 9639:f0dcbc6e99b1
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libperfstat_aix.hpp"
40#include "loadlib_aix.hpp"
41#include "memory/allocation.inline.hpp"
42#include "memory/filemap.hpp"
43#include "misc_aix.hpp"
44#include "mutex_aix.inline.hpp"
45#include "oops/oop.inline.hpp"
46#include "os_aix.inline.hpp"
47#include "os_share_aix.hpp"
48#include "porting_aix.hpp"
49#include "prims/jniFastGetField.hpp"
50#include "prims/jvm.h"
51#include "prims/jvm_misc.hpp"
52#include "runtime/arguments.hpp"
53#include "runtime/atomic.inline.hpp"
54#include "runtime/extendedPC.hpp"
55#include "runtime/globals.hpp"
56#include "runtime/interfaceSupport.hpp"
57#include "runtime/java.hpp"
58#include "runtime/javaCalls.hpp"
59#include "runtime/mutexLocker.hpp"
60#include "runtime/objectMonitor.hpp"
61#include "runtime/orderAccess.inline.hpp"
62#include "runtime/os.hpp"
63#include "runtime/osThread.hpp"
64#include "runtime/perfMemory.hpp"
65#include "runtime/sharedRuntime.hpp"
66#include "runtime/statSampler.hpp"
67#include "runtime/stubRoutines.hpp"
68#include "runtime/thread.inline.hpp"
69#include "runtime/threadCritical.hpp"
70#include "runtime/timer.hpp"
71#include "runtime/vm_version.hpp"
72#include "services/attachListener.hpp"
73#include "services/runtimeService.hpp"
74#include "utilities/decoder.hpp"
75#include "utilities/defaultStream.hpp"
76#include "utilities/events.hpp"
77#include "utilities/growableArray.hpp"
78#include "utilities/vmError.hpp"
79
80// put OS-includes here (sorted alphabetically)
81#include <errno.h>
82#include <fcntl.h>
83#include <inttypes.h>
84#include <poll.h>
85#include <procinfo.h>
86#include <pthread.h>
87#include <pwd.h>
88#include <semaphore.h>
89#include <signal.h>
90#include <stdint.h>
91#include <stdio.h>
92#include <string.h>
93#include <unistd.h>
94#include <sys/ioctl.h>
95#include <sys/ipc.h>
96#include <sys/mman.h>
97#include <sys/resource.h>
98#include <sys/select.h>
99#include <sys/shm.h>
100#include <sys/socket.h>
101#include <sys/stat.h>
102#include <sys/sysinfo.h>
103#include <sys/systemcfg.h>
104#include <sys/time.h>
105#include <sys/times.h>
106#include <sys/types.h>
107#include <sys/utsname.h>
108#include <sys/vminfo.h>
109#include <sys/wait.h>
110
111// If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
112// getrusage() is prepared to handle the associated failure.
113#ifndef RUSAGE_THREAD
114#define RUSAGE_THREAD   (1)               /* only the calling thread */
115#endif
116
117// PPC port
118static const uintx Use64KPagesThreshold       = 1*M;
119static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
120
121// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
122#if !defined(_AIXVERSION_610)
123extern "C" {
124  int getthrds64(pid_t ProcessIdentifier,
125                 struct thrdentry64* ThreadBuffer,
126                 int ThreadSize,
127                 tid64_t* IndexPointer,
128                 int Count);
129}
130#endif
131
132#define MAX_PATH (2 * K)
133
134// for timer info max values which include all bits
135#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
136// for multipage initialization error analysis (in 'g_multipage_error')
137#define ERROR_MP_OS_TOO_OLD                          100
138#define ERROR_MP_EXTSHM_ACTIVE                       101
139#define ERROR_MP_VMGETINFO_FAILED                    102
140#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
141
142// The semantics in this file are thus that codeptr_t is a *real code ptr*.
143// This means that any function taking codeptr_t as arguments will assume
144// a real codeptr and won't handle function descriptors (eg getFuncName),
145// whereas functions taking address as args will deal with function
146// descriptors (eg os::dll_address_to_library_name).
147typedef unsigned int* codeptr_t;
148
149// Typedefs for stackslots, stack pointers, pointers to op codes.
150typedef unsigned long stackslot_t;
151typedef stackslot_t* stackptr_t;
152
153// Excerpts from systemcfg.h definitions newer than AIX 5.3.
154#ifndef PV_7
155#define PV_7 0x200000          /* Power PC 7 */
156#define PV_7_Compat 0x208000   /* Power PC 7 */
157#endif
158#ifndef PV_8
159#define PV_8 0x300000          /* Power PC 8 */
160#define PV_8_Compat 0x308000   /* Power PC 8 */
161#endif
162
163// Query dimensions of the stack of the calling thread.
164static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
165
166// Function to check a given stack pointer against given stack limits.
167inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
168  if (((uintptr_t)sp) & 0x7) {
169    return false;
170  }
171  if (sp > stack_base) {
172    return false;
173  }
174  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
175    return false;
176  }
177  return true;
178}
179
180// Returns true if function is a valid codepointer.
181inline bool is_valid_codepointer(codeptr_t p) {
182  if (!p) {
183    return false;
184  }
185  if (((uintptr_t)p) & 0x3) {
186    return false;
187  }
188  if (!LoadedLibraries::find_for_text_address(p, NULL)) {
189    return false;
190  }
191  return true;
192}
193
194// Macro to check a given stack pointer against given stack limits and to die if test fails.
195#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
196    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
197}
198
199// Macro to check the current stack pointer against given stacklimits.
200#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
201  address sp; \
202  sp = os::current_stack_pointer(); \
203  CHECK_STACK_PTR(sp, stack_base, stack_size); \
204}
205
206////////////////////////////////////////////////////////////////////////////////
207// global variables (for a description see os_aix.hpp)
208
209julong    os::Aix::_physical_memory = 0;
210pthread_t os::Aix::_main_thread = ((pthread_t)0);
211int       os::Aix::_page_size = -1;
212int       os::Aix::_on_pase = -1;
213int       os::Aix::_os_version = -1;
214int       os::Aix::_stack_page_size = -1;
215int       os::Aix::_xpg_sus_mode = -1;
216int       os::Aix::_extshm = -1;
217int       os::Aix::_logical_cpus = -1;
218
219////////////////////////////////////////////////////////////////////////////////
220// local variables
221
222static int      g_multipage_error  = -1;   // error analysis for multipage initialization
223static jlong    initial_time_count = 0;
224static int      clock_tics_per_sec = 100;
225static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
226static bool     check_signals      = true;
227static pid_t    _initial_pid       = 0;
228static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
229static sigset_t SR_sigset;
230
231// This describes the state of multipage support of the underlying
232// OS. Note that this is of no interest to the outsize world and
233// therefore should not be defined in AIX class.
234//
235// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
236// latter two (16M "large" resp. 16G "huge" pages) require special
237// setup and are normally not available.
238//
239// AIX supports multiple page sizes per process, for:
240//  - Stack (of the primordial thread, so not relevant for us)
241//  - Data - data, bss, heap, for us also pthread stacks
242//  - Text - text code
243//  - shared memory
244//
245// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
246// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
247//
248// For shared memory, page size can be set dynamically via
249// shmctl(). Different shared memory regions can have different page
250// sizes.
251//
252// More information can be found at AIBM info center:
253//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
254//
255static struct {
256  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
257  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
258  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
259  size_t pthr_stack_pagesize; // stack page size of pthread threads
260  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
261  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
262  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
263  int error;                  // Error describing if something went wrong at multipage init.
264} g_multipage_support = {
265  (size_t) -1,
266  (size_t) -1,
267  (size_t) -1,
268  (size_t) -1,
269  (size_t) -1,
270  false, false,
271  0
272};
273
274// We must not accidentally allocate memory close to the BRK - even if
275// that would work - because then we prevent the BRK segment from
276// growing which may result in a malloc OOM even though there is
277// enough memory. The problem only arises if we shmat() or mmap() at
278// a specific wish address, e.g. to place the heap in a
279// compressed-oops-friendly way.
280static bool is_close_to_brk(address a) {
281  address a1 = (address) sbrk(0);
282  if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
283    return true;
284  }
285  return false;
286}
287
288julong os::available_memory() {
289  return Aix::available_memory();
290}
291
292julong os::Aix::available_memory() {
293  os::Aix::meminfo_t mi;
294  if (os::Aix::get_meminfo(&mi)) {
295    return mi.real_free;
296  } else {
297    return 0xFFFFFFFFFFFFFFFFLL;
298  }
299}
300
301julong os::physical_memory() {
302  return Aix::physical_memory();
303}
304
305// Return true if user is running as root.
306
307bool os::have_special_privileges() {
308  static bool init = false;
309  static bool privileges = false;
310  if (!init) {
311    privileges = (getuid() != geteuid()) || (getgid() != getegid());
312    init = true;
313  }
314  return privileges;
315}
316
317// Helper function, emulates disclaim64 using multiple 32bit disclaims
318// because we cannot use disclaim64() on AS/400 and old AIX releases.
319static bool my_disclaim64(char* addr, size_t size) {
320
321  if (size == 0) {
322    return true;
323  }
324
325  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
326  const unsigned int maxDisclaimSize = 0x40000000;
327
328  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
329  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
330
331  char* p = addr;
332
333  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
334    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
335      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
336      return false;
337    }
338    p += maxDisclaimSize;
339  }
340
341  if (lastDisclaimSize > 0) {
342    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
343      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
344      return false;
345    }
346  }
347
348  return true;
349}
350
351// Cpu architecture string
352#if defined(PPC32)
353static char cpu_arch[] = "ppc";
354#elif defined(PPC64)
355static char cpu_arch[] = "ppc64";
356#else
357#error Add appropriate cpu_arch setting
358#endif
359
360
361// Given an address, returns the size of the page backing that address.
362size_t os::Aix::query_pagesize(void* addr) {
363
364  vm_page_info pi;
365  pi.addr = (uint64_t)addr;
366  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
367    return pi.pagesize;
368  } else {
369    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
370    assert(false, "vmgetinfo failed to retrieve page size");
371    return SIZE_4K;
372  }
373
374}
375
376// Returns the kernel thread id of the currently running thread.
377pid_t os::Aix::gettid() {
378  return (pid_t) thread_self();
379}
380
381void os::Aix::initialize_system_info() {
382
383  // Get the number of online(logical) cpus instead of configured.
384  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
385  assert(_processor_count > 0, "_processor_count must be > 0");
386
387  // Retrieve total physical storage.
388  os::Aix::meminfo_t mi;
389  if (!os::Aix::get_meminfo(&mi)) {
390    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
391    assert(false, "os::Aix::get_meminfo failed.");
392  }
393  _physical_memory = (julong) mi.real_total;
394}
395
396// Helper function for tracing page sizes.
397static const char* describe_pagesize(size_t pagesize) {
398  switch (pagesize) {
399    case SIZE_4K : return "4K";
400    case SIZE_64K: return "64K";
401    case SIZE_16M: return "16M";
402    case SIZE_16G: return "16G";
403    case -1:       return "not set";
404    default:
405      assert(false, "surprise");
406      return "??";
407  }
408}
409
410// Probe OS for multipage support.
411// Will fill the global g_multipage_support structure.
412// Must be called before calling os::large_page_init().
413static void query_multipage_support() {
414
415  guarantee(g_multipage_support.pagesize == -1,
416            "do not call twice");
417
418  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
419
420  // This really would surprise me.
421  assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
422
423  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
424  // Default data page size is defined either by linker options (-bdatapsize)
425  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
426  // default should be 4K.
427  {
428    void* p = ::malloc(SIZE_16M);
429    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
430    ::free(p);
431  }
432
433  // Query default shm page size (LDR_CNTRL SHMPSIZE).
434  {
435    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
436    guarantee(shmid != -1, "shmget failed");
437    void* p = ::shmat(shmid, NULL, 0);
438    ::shmctl(shmid, IPC_RMID, NULL);
439    guarantee(p != (void*) -1, "shmat failed");
440    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
441    ::shmdt(p);
442  }
443
444  // Before querying the stack page size, make sure we are not running as primordial
445  // thread (because primordial thread's stack may have different page size than
446  // pthread thread stacks). Running a VM on the primordial thread won't work for a
447  // number of reasons so we may just as well guarantee it here.
448  guarantee0(!os::Aix::is_primordial_thread());
449
450  // Query pthread stack page size.
451  {
452    int dummy = 0;
453    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
454  }
455
456  // Query default text page size (LDR_CNTRL TEXTPSIZE).
457  /* PPC port: so far unused.
458  {
459    address any_function =
460      (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
461    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
462  }
463  */
464
465  // Now probe for support of 64K pages and 16M pages.
466
467  // Before OS/400 V6R1, there is no support for pages other than 4K.
468  if (os::Aix::on_pase_V5R4_or_older()) {
469    Unimplemented();
470    goto query_multipage_support_end;
471  }
472
473  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
474  {
475    const int MAX_PAGE_SIZES = 4;
476    psize_t sizes[MAX_PAGE_SIZES];
477    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
478    if (num_psizes == -1) {
479      trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
480      trc("disabling multipage support.\n");
481      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
482      goto query_multipage_support_end;
483    }
484    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
485    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
486    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
487    for (int i = 0; i < num_psizes; i ++) {
488      trcVerbose(" %s ", describe_pagesize(sizes[i]));
489    }
490
491    // Can we use 64K, 16M pages?
492    for (int i = 0; i < num_psizes; i ++) {
493      const size_t pagesize = sizes[i];
494      if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
495        continue;
496      }
497      bool can_use = false;
498      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
499      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
500        IPC_CREAT | S_IRUSR | S_IWUSR);
501      guarantee0(shmid != -1); // Should always work.
502      // Try to set pagesize.
503      struct shmid_ds shm_buf = { 0 };
504      shm_buf.shm_pagesize = pagesize;
505      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
506        const int en = errno;
507        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
508        // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
509        // PPC port  MiscUtils::describe_errno(en));
510      } else {
511        // Attach and double check pageisze.
512        void* p = ::shmat(shmid, NULL, 0);
513        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
514        guarantee0(p != (void*) -1); // Should always work.
515        const size_t real_pagesize = os::Aix::query_pagesize(p);
516        if (real_pagesize != pagesize) {
517          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
518        } else {
519          can_use = true;
520        }
521        ::shmdt(p);
522      }
523      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
524      if (pagesize == SIZE_64K) {
525        g_multipage_support.can_use_64K_pages = can_use;
526      } else if (pagesize == SIZE_16M) {
527        g_multipage_support.can_use_16M_pages = can_use;
528      }
529    }
530
531  } // end: check which pages can be used for shared memory
532
533query_multipage_support_end:
534
535  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
536      describe_pagesize(g_multipage_support.pagesize));
537  trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
538      describe_pagesize(g_multipage_support.datapsize));
539  trcVerbose("Text page size: %s\n",
540      describe_pagesize(g_multipage_support.textpsize));
541  trcVerbose("Thread stack page size (pthread): %s\n",
542      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
543  trcVerbose("Default shared memory page size: %s\n",
544      describe_pagesize(g_multipage_support.shmpsize));
545  trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
546      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
547  trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
548      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
549  trcVerbose("Multipage error details: %d\n",
550      g_multipage_support.error);
551
552  // sanity checks
553  assert0(g_multipage_support.pagesize == SIZE_4K);
554  assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
555  // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
556  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
557  assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
558
559} // end os::Aix::query_multipage_support()
560
561void os::init_system_properties_values() {
562
563#define DEFAULT_LIBPATH "/usr/lib:/lib"
564#define EXTENSIONS_DIR  "/lib/ext"
565
566  // Buffer that fits several sprintfs.
567  // Note that the space for the trailing null is provided
568  // by the nulls included by the sizeof operator.
569  const size_t bufsize =
570    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
571         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
572  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
573
574  // sysclasspath, java_home, dll_dir
575  {
576    char *pslash;
577    os::jvm_path(buf, bufsize);
578
579    // Found the full path to libjvm.so.
580    // Now cut the path to <java_home>/jre if we can.
581    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
582    pslash = strrchr(buf, '/');
583    if (pslash != NULL) {
584      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
585    }
586    Arguments::set_dll_dir(buf);
587
588    if (pslash != NULL) {
589      pslash = strrchr(buf, '/');
590      if (pslash != NULL) {
591        *pslash = '\0';          // Get rid of /<arch>.
592        pslash = strrchr(buf, '/');
593        if (pslash != NULL) {
594          *pslash = '\0';        // Get rid of /lib.
595        }
596      }
597    }
598    Arguments::set_java_home(buf);
599    set_boot_path('/', ':');
600  }
601
602  // Where to look for native libraries.
603
604  // On Aix we get the user setting of LIBPATH.
605  // Eventually, all the library path setting will be done here.
606  // Get the user setting of LIBPATH.
607  const char *v = ::getenv("LIBPATH");
608  const char *v_colon = ":";
609  if (v == NULL) { v = ""; v_colon = ""; }
610
611  // Concatenate user and invariant part of ld_library_path.
612  // That's +1 for the colon and +1 for the trailing '\0'.
613  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
614  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
615  Arguments::set_library_path(ld_library_path);
616  FREE_C_HEAP_ARRAY(char, ld_library_path);
617
618  // Extensions directories.
619  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
620  Arguments::set_ext_dirs(buf);
621
622  FREE_C_HEAP_ARRAY(char, buf);
623
624#undef DEFAULT_LIBPATH
625#undef EXTENSIONS_DIR
626}
627
628////////////////////////////////////////////////////////////////////////////////
629// breakpoint support
630
631void os::breakpoint() {
632  BREAKPOINT;
633}
634
635extern "C" void breakpoint() {
636  // use debugger to set breakpoint here
637}
638
639////////////////////////////////////////////////////////////////////////////////
640// signal support
641
642debug_only(static bool signal_sets_initialized = false);
643static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
644
645bool os::Aix::is_sig_ignored(int sig) {
646  struct sigaction oact;
647  sigaction(sig, (struct sigaction*)NULL, &oact);
648  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
649    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
650  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
651    return true;
652  } else {
653    return false;
654  }
655}
656
657void os::Aix::signal_sets_init() {
658  // Should also have an assertion stating we are still single-threaded.
659  assert(!signal_sets_initialized, "Already initialized");
660  // Fill in signals that are necessarily unblocked for all threads in
661  // the VM. Currently, we unblock the following signals:
662  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
663  //                         by -Xrs (=ReduceSignalUsage));
664  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
665  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
666  // the dispositions or masks wrt these signals.
667  // Programs embedding the VM that want to use the above signals for their
668  // own purposes must, at this time, use the "-Xrs" option to prevent
669  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
670  // (See bug 4345157, and other related bugs).
671  // In reality, though, unblocking these signals is really a nop, since
672  // these signals are not blocked by default.
673  sigemptyset(&unblocked_sigs);
674  sigemptyset(&allowdebug_blocked_sigs);
675  sigaddset(&unblocked_sigs, SIGILL);
676  sigaddset(&unblocked_sigs, SIGSEGV);
677  sigaddset(&unblocked_sigs, SIGBUS);
678  sigaddset(&unblocked_sigs, SIGFPE);
679  sigaddset(&unblocked_sigs, SIGTRAP);
680  sigaddset(&unblocked_sigs, SIGDANGER);
681  sigaddset(&unblocked_sigs, SR_signum);
682
683  if (!ReduceSignalUsage) {
684   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
685     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
686     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
687   }
688   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
689     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
690     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
691   }
692   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
693     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
694     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
695   }
696  }
697  // Fill in signals that are blocked by all but the VM thread.
698  sigemptyset(&vm_sigs);
699  if (!ReduceSignalUsage)
700    sigaddset(&vm_sigs, BREAK_SIGNAL);
701  debug_only(signal_sets_initialized = true);
702}
703
704// These are signals that are unblocked while a thread is running Java.
705// (For some reason, they get blocked by default.)
706sigset_t* os::Aix::unblocked_signals() {
707  assert(signal_sets_initialized, "Not initialized");
708  return &unblocked_sigs;
709}
710
711// These are the signals that are blocked while a (non-VM) thread is
712// running Java. Only the VM thread handles these signals.
713sigset_t* os::Aix::vm_signals() {
714  assert(signal_sets_initialized, "Not initialized");
715  return &vm_sigs;
716}
717
718// These are signals that are blocked during cond_wait to allow debugger in
719sigset_t* os::Aix::allowdebug_blocked_signals() {
720  assert(signal_sets_initialized, "Not initialized");
721  return &allowdebug_blocked_sigs;
722}
723
724void os::Aix::hotspot_sigmask(Thread* thread) {
725
726  //Save caller's signal mask before setting VM signal mask
727  sigset_t caller_sigmask;
728  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
729
730  OSThread* osthread = thread->osthread();
731  osthread->set_caller_sigmask(caller_sigmask);
732
733  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
734
735  if (!ReduceSignalUsage) {
736    if (thread->is_VM_thread()) {
737      // Only the VM thread handles BREAK_SIGNAL ...
738      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
739    } else {
740      // ... all other threads block BREAK_SIGNAL
741      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
742    }
743  }
744}
745
746// retrieve memory information.
747// Returns false if something went wrong;
748// content of pmi undefined in this case.
749bool os::Aix::get_meminfo(meminfo_t* pmi) {
750
751  assert(pmi, "get_meminfo: invalid parameter");
752
753  memset(pmi, 0, sizeof(meminfo_t));
754
755  if (os::Aix::on_pase()) {
756
757    Unimplemented();
758    return false;
759
760  } else {
761
762    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
763    // See:
764    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
765    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
766    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
767    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
768
769    perfstat_memory_total_t psmt;
770    memset (&psmt, '\0', sizeof(psmt));
771    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
772    if (rc == -1) {
773      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
774      assert(0, "perfstat_memory_total() failed");
775      return false;
776    }
777
778    assert(rc == 1, "perfstat_memory_total() - weird return code");
779
780    // excerpt from
781    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
782    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
783    // The fields of perfstat_memory_total_t:
784    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
785    // u_longlong_t real_total         Total real memory (in 4 KB pages).
786    // u_longlong_t real_free          Free real memory (in 4 KB pages).
787    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
788    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
789
790    pmi->virt_total = psmt.virt_total * 4096;
791    pmi->real_total = psmt.real_total * 4096;
792    pmi->real_free = psmt.real_free * 4096;
793    pmi->pgsp_total = psmt.pgsp_total * 4096;
794    pmi->pgsp_free = psmt.pgsp_free * 4096;
795
796    return true;
797
798  }
799} // end os::Aix::get_meminfo
800
801// Retrieve global cpu information.
802// Returns false if something went wrong;
803// the content of pci is undefined in this case.
804bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
805  assert(pci, "get_cpuinfo: invalid parameter");
806  memset(pci, 0, sizeof(cpuinfo_t));
807
808  perfstat_cpu_total_t psct;
809  memset (&psct, '\0', sizeof(psct));
810
811  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
812    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
813    assert(0, "perfstat_cpu_total() failed");
814    return false;
815  }
816
817  // global cpu information
818  strcpy (pci->description, psct.description);
819  pci->processorHZ = psct.processorHZ;
820  pci->ncpus = psct.ncpus;
821  os::Aix::_logical_cpus = psct.ncpus;
822  for (int i = 0; i < 3; i++) {
823    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
824  }
825
826  // get the processor version from _system_configuration
827  switch (_system_configuration.version) {
828  case PV_8:
829    strcpy(pci->version, "Power PC 8");
830    break;
831  case PV_7:
832    strcpy(pci->version, "Power PC 7");
833    break;
834  case PV_6_1:
835    strcpy(pci->version, "Power PC 6 DD1.x");
836    break;
837  case PV_6:
838    strcpy(pci->version, "Power PC 6");
839    break;
840  case PV_5:
841    strcpy(pci->version, "Power PC 5");
842    break;
843  case PV_5_2:
844    strcpy(pci->version, "Power PC 5_2");
845    break;
846  case PV_5_3:
847    strcpy(pci->version, "Power PC 5_3");
848    break;
849  case PV_5_Compat:
850    strcpy(pci->version, "PV_5_Compat");
851    break;
852  case PV_6_Compat:
853    strcpy(pci->version, "PV_6_Compat");
854    break;
855  case PV_7_Compat:
856    strcpy(pci->version, "PV_7_Compat");
857    break;
858  case PV_8_Compat:
859    strcpy(pci->version, "PV_8_Compat");
860    break;
861  default:
862    strcpy(pci->version, "unknown");
863  }
864
865  return true;
866
867} //end os::Aix::get_cpuinfo
868
869//////////////////////////////////////////////////////////////////////////////
870// detecting pthread library
871
872void os::Aix::libpthread_init() {
873  return;
874}
875
876//////////////////////////////////////////////////////////////////////////////
877// create new thread
878
879// Thread start routine for all newly created threads
880static void *java_start(Thread *thread) {
881
882  // find out my own stack dimensions
883  {
884    // actually, this should do exactly the same as thread->record_stack_base_and_size...
885    address base = 0;
886    size_t size = 0;
887    query_stack_dimensions(&base, &size);
888    thread->set_stack_base(base);
889    thread->set_stack_size(size);
890  }
891
892  // Do some sanity checks.
893  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
894
895  // Try to randomize the cache line index of hot stack frames.
896  // This helps when threads of the same stack traces evict each other's
897  // cache lines. The threads can be either from the same JVM instance, or
898  // from different JVM instances. The benefit is especially true for
899  // processors with hyperthreading technology.
900
901  static int counter = 0;
902  int pid = os::current_process_id();
903  alloca(((pid ^ counter++) & 7) * 128);
904
905  ThreadLocalStorage::set_thread(thread);
906
907  OSThread* osthread = thread->osthread();
908
909  // thread_id is kernel thread id (similar to Solaris LWP id)
910  osthread->set_thread_id(os::Aix::gettid());
911
912  // initialize signal mask for this thread
913  os::Aix::hotspot_sigmask(thread);
914
915  // initialize floating point control register
916  os::Aix::init_thread_fpu_state();
917
918  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
919
920  // call one more level start routine
921  thread->run();
922
923  return 0;
924}
925
926bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
927
928  // We want the whole function to be synchronized.
929  ThreadCritical cs;
930
931  assert(thread->osthread() == NULL, "caller responsible");
932
933  // Allocate the OSThread object
934  OSThread* osthread = new OSThread(NULL, NULL);
935  if (osthread == NULL) {
936    return false;
937  }
938
939  // set the correct thread state
940  osthread->set_thread_type(thr_type);
941
942  // Initial state is ALLOCATED but not INITIALIZED
943  osthread->set_state(ALLOCATED);
944
945  thread->set_osthread(osthread);
946
947  // init thread attributes
948  pthread_attr_t attr;
949  pthread_attr_init(&attr);
950  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
951
952  // Make sure we run in 1:1 kernel-user-thread mode.
953  if (os::Aix::on_aix()) {
954    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
955    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
956  } // end: aix
957
958  // Start in suspended state, and in os::thread_start, wake the thread up.
959  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
960
961  // calculate stack size if it's not specified by caller
962  if (stack_size == 0) {
963    stack_size = os::Aix::default_stack_size(thr_type);
964
965    switch (thr_type) {
966    case os::java_thread:
967      // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
968      assert(JavaThread::stack_size_at_create() > 0, "this should be set");
969      stack_size = JavaThread::stack_size_at_create();
970      break;
971    case os::compiler_thread:
972      if (CompilerThreadStackSize > 0) {
973        stack_size = (size_t)(CompilerThreadStackSize * K);
974        break;
975      } // else fall through:
976        // use VMThreadStackSize if CompilerThreadStackSize is not defined
977    case os::vm_thread:
978    case os::pgc_thread:
979    case os::cgc_thread:
980    case os::watcher_thread:
981      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
982      break;
983    }
984  }
985
986  stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
987  pthread_attr_setstacksize(&attr, stack_size);
988
989  pthread_t tid;
990  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
991
992  pthread_attr_destroy(&attr);
993
994  if (ret == 0) {
995    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
996  } else {
997    if (PrintMiscellaneous && (Verbose || WizardMode)) {
998      perror("pthread_create()");
999    }
1000    // Need to clean up stuff we've allocated so far
1001    thread->set_osthread(NULL);
1002    delete osthread;
1003    return false;
1004  }
1005
1006  // Store pthread info into the OSThread
1007  osthread->set_pthread_id(tid);
1008
1009  return true;
1010}
1011
1012/////////////////////////////////////////////////////////////////////////////
1013// attach existing thread
1014
1015// bootstrap the main thread
1016bool os::create_main_thread(JavaThread* thread) {
1017  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1018  return create_attached_thread(thread);
1019}
1020
1021bool os::create_attached_thread(JavaThread* thread) {
1022#ifdef ASSERT
1023    thread->verify_not_published();
1024#endif
1025
1026  // Allocate the OSThread object
1027  OSThread* osthread = new OSThread(NULL, NULL);
1028
1029  if (osthread == NULL) {
1030    return false;
1031  }
1032
1033  // Store pthread info into the OSThread
1034  osthread->set_thread_id(os::Aix::gettid());
1035  osthread->set_pthread_id(::pthread_self());
1036
1037  // initialize floating point control register
1038  os::Aix::init_thread_fpu_state();
1039
1040  // some sanity checks
1041  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1042
1043  // Initial thread state is RUNNABLE
1044  osthread->set_state(RUNNABLE);
1045
1046  thread->set_osthread(osthread);
1047
1048  if (UseNUMA) {
1049    int lgrp_id = os::numa_get_group_id();
1050    if (lgrp_id != -1) {
1051      thread->set_lgrp_id(lgrp_id);
1052    }
1053  }
1054
1055  // initialize signal mask for this thread
1056  // and save the caller's signal mask
1057  os::Aix::hotspot_sigmask(thread);
1058
1059  return true;
1060}
1061
1062void os::pd_start_thread(Thread* thread) {
1063  int status = pthread_continue_np(thread->osthread()->pthread_id());
1064  assert(status == 0, "thr_continue failed");
1065}
1066
1067// Free OS resources related to the OSThread
1068void os::free_thread(OSThread* osthread) {
1069  assert(osthread != NULL, "osthread not set");
1070
1071  if (Thread::current()->osthread() == osthread) {
1072    // Restore caller's signal mask
1073    sigset_t sigmask = osthread->caller_sigmask();
1074    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1075   }
1076
1077  delete osthread;
1078}
1079
1080//////////////////////////////////////////////////////////////////////////////
1081// thread local storage
1082
1083int os::allocate_thread_local_storage() {
1084  pthread_key_t key;
1085  int rslt = pthread_key_create(&key, NULL);
1086  assert(rslt == 0, "cannot allocate thread local storage");
1087  return (int)key;
1088}
1089
1090// Note: This is currently not used by VM, as we don't destroy TLS key
1091// on VM exit.
1092void os::free_thread_local_storage(int index) {
1093  int rslt = pthread_key_delete((pthread_key_t)index);
1094  assert(rslt == 0, "invalid index");
1095}
1096
1097void os::thread_local_storage_at_put(int index, void* value) {
1098  int rslt = pthread_setspecific((pthread_key_t)index, value);
1099  assert(rslt == 0, "pthread_setspecific failed");
1100}
1101
1102extern "C" Thread* get_thread() {
1103  return ThreadLocalStorage::thread();
1104}
1105
1106////////////////////////////////////////////////////////////////////////////////
1107// time support
1108
1109// Time since start-up in seconds to a fine granularity.
1110// Used by VMSelfDestructTimer and the MemProfiler.
1111double os::elapsedTime() {
1112  return (double)(os::elapsed_counter()) * 0.000001;
1113}
1114
1115jlong os::elapsed_counter() {
1116  timeval time;
1117  int status = gettimeofday(&time, NULL);
1118  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1119}
1120
1121jlong os::elapsed_frequency() {
1122  return (1000 * 1000);
1123}
1124
1125bool os::supports_vtime() { return true; }
1126bool os::enable_vtime()   { return false; }
1127bool os::vtime_enabled()  { return false; }
1128
1129double os::elapsedVTime() {
1130  struct rusage usage;
1131  int retval = getrusage(RUSAGE_THREAD, &usage);
1132  if (retval == 0) {
1133    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1134  } else {
1135    // better than nothing, but not much
1136    return elapsedTime();
1137  }
1138}
1139
1140jlong os::javaTimeMillis() {
1141  timeval time;
1142  int status = gettimeofday(&time, NULL);
1143  assert(status != -1, "aix error at gettimeofday()");
1144  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1145}
1146
1147void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1148  timeval time;
1149  int status = gettimeofday(&time, NULL);
1150  assert(status != -1, "aix error at gettimeofday()");
1151  seconds = jlong(time.tv_sec);
1152  nanos = jlong(time.tv_usec) * 1000;
1153}
1154
1155
1156// We need to manually declare mread_real_time,
1157// because IBM didn't provide a prototype in time.h.
1158// (they probably only ever tested in C, not C++)
1159extern "C"
1160int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1161
1162jlong os::javaTimeNanos() {
1163  if (os::Aix::on_pase()) {
1164    Unimplemented();
1165    return 0;
1166  } else {
1167    // On AIX use the precision of processors real time clock
1168    // or time base registers.
1169    timebasestruct_t time;
1170    int rc;
1171
1172    // If the CPU has a time register, it will be used and
1173    // we have to convert to real time first. After convertion we have following data:
1174    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1175    // time.tb_low  [nanoseconds after the last full second above]
1176    // We better use mread_real_time here instead of read_real_time
1177    // to ensure that we will get a monotonic increasing time.
1178    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1179      rc = time_base_to_time(&time, TIMEBASE_SZ);
1180      assert(rc != -1, "aix error at time_base_to_time()");
1181    }
1182    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1183  }
1184}
1185
1186void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1187  info_ptr->max_value = ALL_64_BITS;
1188  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1189  info_ptr->may_skip_backward = false;
1190  info_ptr->may_skip_forward = false;
1191  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1192}
1193
1194// Return the real, user, and system times in seconds from an
1195// arbitrary fixed point in the past.
1196bool os::getTimesSecs(double* process_real_time,
1197                      double* process_user_time,
1198                      double* process_system_time) {
1199  struct tms ticks;
1200  clock_t real_ticks = times(&ticks);
1201
1202  if (real_ticks == (clock_t) (-1)) {
1203    return false;
1204  } else {
1205    double ticks_per_second = (double) clock_tics_per_sec;
1206    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1207    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1208    *process_real_time = ((double) real_ticks) / ticks_per_second;
1209
1210    return true;
1211  }
1212}
1213
1214char * os::local_time_string(char *buf, size_t buflen) {
1215  struct tm t;
1216  time_t long_time;
1217  time(&long_time);
1218  localtime_r(&long_time, &t);
1219  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1220               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1221               t.tm_hour, t.tm_min, t.tm_sec);
1222  return buf;
1223}
1224
1225struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1226  return localtime_r(clock, res);
1227}
1228
1229////////////////////////////////////////////////////////////////////////////////
1230// runtime exit support
1231
1232// Note: os::shutdown() might be called very early during initialization, or
1233// called from signal handler. Before adding something to os::shutdown(), make
1234// sure it is async-safe and can handle partially initialized VM.
1235void os::shutdown() {
1236
1237  // allow PerfMemory to attempt cleanup of any persistent resources
1238  perfMemory_exit();
1239
1240  // needs to remove object in file system
1241  AttachListener::abort();
1242
1243  // flush buffered output, finish log files
1244  ostream_abort();
1245
1246  // Check for abort hook
1247  abort_hook_t abort_hook = Arguments::abort_hook();
1248  if (abort_hook != NULL) {
1249    abort_hook();
1250  }
1251}
1252
1253// Note: os::abort() might be called very early during initialization, or
1254// called from signal handler. Before adding something to os::abort(), make
1255// sure it is async-safe and can handle partially initialized VM.
1256void os::abort(bool dump_core, void* siginfo, void* context) {
1257  os::shutdown();
1258  if (dump_core) {
1259#ifndef PRODUCT
1260    fdStream out(defaultStream::output_fd());
1261    out.print_raw("Current thread is ");
1262    char buf[16];
1263    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1264    out.print_raw_cr(buf);
1265    out.print_raw_cr("Dumping core ...");
1266#endif
1267    ::abort(); // dump core
1268  }
1269
1270  ::exit(1);
1271}
1272
1273// Die immediately, no exit hook, no abort hook, no cleanup.
1274void os::die() {
1275  ::abort();
1276}
1277
1278// This method is a copy of JDK's sysGetLastErrorString
1279// from src/solaris/hpi/src/system_md.c
1280
1281size_t os::lasterror(char *buf, size_t len) {
1282  if (errno == 0) return 0;
1283
1284  const char *s = ::strerror(errno);
1285  size_t n = ::strlen(s);
1286  if (n >= len) {
1287    n = len - 1;
1288  }
1289  ::strncpy(buf, s, n);
1290  buf[n] = '\0';
1291  return n;
1292}
1293
1294intx os::current_thread_id() { return (intx)pthread_self(); }
1295
1296int os::current_process_id() {
1297
1298  // This implementation returns a unique pid, the pid of the
1299  // launcher thread that starts the vm 'process'.
1300
1301  // Under POSIX, getpid() returns the same pid as the
1302  // launcher thread rather than a unique pid per thread.
1303  // Use gettid() if you want the old pre NPTL behaviour.
1304
1305  // if you are looking for the result of a call to getpid() that
1306  // returns a unique pid for the calling thread, then look at the
1307  // OSThread::thread_id() method in osThread_linux.hpp file
1308
1309  return (int)(_initial_pid ? _initial_pid : getpid());
1310}
1311
1312// DLL functions
1313
1314const char* os::dll_file_extension() { return ".so"; }
1315
1316// This must be hard coded because it's the system's temporary
1317// directory not the java application's temp directory, ala java.io.tmpdir.
1318const char* os::get_temp_directory() { return "/tmp"; }
1319
1320static bool file_exists(const char* filename) {
1321  struct stat statbuf;
1322  if (filename == NULL || strlen(filename) == 0) {
1323    return false;
1324  }
1325  return os::stat(filename, &statbuf) == 0;
1326}
1327
1328bool os::dll_build_name(char* buffer, size_t buflen,
1329                        const char* pname, const char* fname) {
1330  bool retval = false;
1331  // Copied from libhpi
1332  const size_t pnamelen = pname ? strlen(pname) : 0;
1333
1334  // Return error on buffer overflow.
1335  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1336    *buffer = '\0';
1337    return retval;
1338  }
1339
1340  if (pnamelen == 0) {
1341    snprintf(buffer, buflen, "lib%s.so", fname);
1342    retval = true;
1343  } else if (strchr(pname, *os::path_separator()) != NULL) {
1344    int n;
1345    char** pelements = split_path(pname, &n);
1346    for (int i = 0; i < n; i++) {
1347      // Really shouldn't be NULL, but check can't hurt
1348      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1349        continue; // skip the empty path values
1350      }
1351      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1352      if (file_exists(buffer)) {
1353        retval = true;
1354        break;
1355      }
1356    }
1357    // release the storage
1358    for (int i = 0; i < n; i++) {
1359      if (pelements[i] != NULL) {
1360        FREE_C_HEAP_ARRAY(char, pelements[i]);
1361      }
1362    }
1363    if (pelements != NULL) {
1364      FREE_C_HEAP_ARRAY(char*, pelements);
1365    }
1366  } else {
1367    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1368    retval = true;
1369  }
1370  return retval;
1371}
1372
1373// Check if addr is inside libjvm.so.
1374bool os::address_is_in_vm(address addr) {
1375
1376  // Input could be a real pc or a function pointer literal. The latter
1377  // would be a function descriptor residing in the data segment of a module.
1378  loaded_module_t lm;
1379  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1380    return lm.is_in_vm;
1381  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1382    return lm.is_in_vm;
1383  } else {
1384    return false;
1385  }
1386
1387}
1388
1389// Resolve an AIX function descriptor literal to a code pointer.
1390// If the input is a valid code pointer to a text segment of a loaded module,
1391//   it is returned unchanged.
1392// If the input is a valid AIX function descriptor, it is resolved to the
1393//   code entry point.
1394// If the input is neither a valid function descriptor nor a valid code pointer,
1395//   NULL is returned.
1396static address resolve_function_descriptor_to_code_pointer(address p) {
1397
1398  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1399    // It is a real code pointer.
1400    return p;
1401  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1402    // Pointer to data segment, potential function descriptor.
1403    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1404    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1405      // It is a function descriptor.
1406      return code_entry;
1407    }
1408  }
1409
1410  return NULL;
1411}
1412
1413bool os::dll_address_to_function_name(address addr, char *buf,
1414                                      int buflen, int *offset,
1415                                      bool demangle) {
1416  if (offset) {
1417    *offset = -1;
1418  }
1419  // Buf is not optional, but offset is optional.
1420  assert(buf != NULL, "sanity check");
1421  buf[0] = '\0';
1422
1423  // Resolve function ptr literals first.
1424  addr = resolve_function_descriptor_to_code_pointer(addr);
1425  if (!addr) {
1426    return false;
1427  }
1428
1429  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1430  return Decoder::decode(addr, buf, buflen, offset, demangle);
1431}
1432
1433static int getModuleName(codeptr_t pc,                    // [in] program counter
1434                         char* p_name, size_t namelen,    // [out] optional: function name
1435                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1436                         ) {
1437
1438  if (p_name && namelen > 0) {
1439    *p_name = '\0';
1440  }
1441  if (p_errmsg && errmsglen > 0) {
1442    *p_errmsg = '\0';
1443  }
1444
1445  if (p_name && namelen > 0) {
1446    loaded_module_t lm;
1447    if (LoadedLibraries::find_for_text_address(pc, &lm) != NULL) {
1448      strncpy(p_name, lm.shortname, namelen);
1449      p_name[namelen - 1] = '\0';
1450    }
1451    return 0;
1452  }
1453
1454  return -1;
1455}
1456
1457bool os::dll_address_to_library_name(address addr, char* buf,
1458                                     int buflen, int* offset) {
1459  if (offset) {
1460    *offset = -1;
1461  }
1462  // Buf is not optional, but offset is optional.
1463  assert(buf != NULL, "sanity check");
1464  buf[0] = '\0';
1465
1466  // Resolve function ptr literals first.
1467  addr = resolve_function_descriptor_to_code_pointer(addr);
1468  if (!addr) {
1469    return false;
1470  }
1471
1472  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1473    return true;
1474  }
1475  return false;
1476}
1477
1478// Loads .dll/.so and in case of error it checks if .dll/.so was built
1479// for the same architecture as Hotspot is running on.
1480void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1481
1482  if (ebuf && ebuflen > 0) {
1483    ebuf[0] = '\0';
1484    ebuf[ebuflen - 1] = '\0';
1485  }
1486
1487  if (!filename || strlen(filename) == 0) {
1488    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1489    return NULL;
1490  }
1491
1492  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1493  void * result= ::dlopen(filename, RTLD_LAZY);
1494  if (result != NULL) {
1495    // Reload dll cache. Don't do this in signal handling.
1496    LoadedLibraries::reload();
1497    return result;
1498  } else {
1499    // error analysis when dlopen fails
1500    const char* const error_report = ::dlerror();
1501    if (error_report && ebuf && ebuflen > 0) {
1502      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1503               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1504    }
1505  }
1506  return NULL;
1507}
1508
1509void* os::dll_lookup(void* handle, const char* name) {
1510  void* res = dlsym(handle, name);
1511  return res;
1512}
1513
1514void* os::get_default_process_handle() {
1515  return (void*)::dlopen(NULL, RTLD_LAZY);
1516}
1517
1518void os::print_dll_info(outputStream *st) {
1519  st->print_cr("Dynamic libraries:");
1520  LoadedLibraries::print(st);
1521}
1522
1523void os::get_summary_os_info(char* buf, size_t buflen) {
1524  // There might be something more readable than uname results for AIX.
1525  struct utsname name;
1526  uname(&name);
1527  snprintf(buf, buflen, "%s %s", name.release, name.version);
1528}
1529
1530void os::print_os_info(outputStream* st) {
1531  st->print("OS:");
1532
1533  st->print("uname:");
1534  struct utsname name;
1535  uname(&name);
1536  st->print(name.sysname); st->print(" ");
1537  st->print(name.nodename); st->print(" ");
1538  st->print(name.release); st->print(" ");
1539  st->print(name.version); st->print(" ");
1540  st->print(name.machine);
1541  st->cr();
1542
1543  // rlimit
1544  st->print("rlimit:");
1545  struct rlimit rlim;
1546
1547  st->print(" STACK ");
1548  getrlimit(RLIMIT_STACK, &rlim);
1549  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1550  else st->print("%uk", rlim.rlim_cur >> 10);
1551
1552  st->print(", CORE ");
1553  getrlimit(RLIMIT_CORE, &rlim);
1554  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1555  else st->print("%uk", rlim.rlim_cur >> 10);
1556
1557  st->print(", NPROC ");
1558  st->print("%d", sysconf(_SC_CHILD_MAX));
1559
1560  st->print(", NOFILE ");
1561  getrlimit(RLIMIT_NOFILE, &rlim);
1562  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1563  else st->print("%d", rlim.rlim_cur);
1564
1565  st->print(", AS ");
1566  getrlimit(RLIMIT_AS, &rlim);
1567  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1568  else st->print("%uk", rlim.rlim_cur >> 10);
1569
1570  // Print limits on DATA, because it limits the C-heap.
1571  st->print(", DATA ");
1572  getrlimit(RLIMIT_DATA, &rlim);
1573  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1574  else st->print("%uk", rlim.rlim_cur >> 10);
1575  st->cr();
1576
1577  // load average
1578  st->print("load average:");
1579  double loadavg[3] = {-1.L, -1.L, -1.L};
1580  os::loadavg(loadavg, 3);
1581  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1582  st->cr();
1583}
1584
1585void os::print_memory_info(outputStream* st) {
1586
1587  st->print_cr("Memory:");
1588
1589  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1590  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1591  st->print_cr("  Default shared memory page size:        %s",
1592    describe_pagesize(g_multipage_support.shmpsize));
1593  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1594    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1595  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1596    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1597  if (g_multipage_error != 0) {
1598    st->print_cr("  multipage error: %d", g_multipage_error);
1599  }
1600
1601  // print out LDR_CNTRL because it affects the default page sizes
1602  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1603  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1604
1605  const char* const extshm = ::getenv("EXTSHM");
1606  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1607  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1608    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1609  }
1610
1611  // Call os::Aix::get_meminfo() to retrieve memory statistics.
1612  os::Aix::meminfo_t mi;
1613  if (os::Aix::get_meminfo(&mi)) {
1614    char buffer[256];
1615    if (os::Aix::on_aix()) {
1616      jio_snprintf(buffer, sizeof(buffer),
1617                   "  physical total : %llu\n"
1618                   "  physical free  : %llu\n"
1619                   "  swap total     : %llu\n"
1620                   "  swap free      : %llu\n",
1621                   mi.real_total,
1622                   mi.real_free,
1623                   mi.pgsp_total,
1624                   mi.pgsp_free);
1625    } else {
1626      Unimplemented();
1627    }
1628    st->print_raw(buffer);
1629  } else {
1630    st->print_cr("  (no more information available)");
1631  }
1632}
1633
1634// Get a string for the cpuinfo that is a summary of the cpu type
1635void os::get_summary_cpu_info(char* buf, size_t buflen) {
1636  // This looks good
1637  os::Aix::cpuinfo_t ci;
1638  if (os::Aix::get_cpuinfo(&ci)) {
1639    strncpy(buf, ci.version, buflen);
1640  } else {
1641    strncpy(buf, "AIX", buflen);
1642  }
1643}
1644
1645void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1646}
1647
1648void os::print_siginfo(outputStream* st, void* siginfo) {
1649  // Use common posix version.
1650  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1651  st->cr();
1652}
1653
1654static void print_signal_handler(outputStream* st, int sig,
1655                                 char* buf, size_t buflen);
1656
1657void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1658  st->print_cr("Signal Handlers:");
1659  print_signal_handler(st, SIGSEGV, buf, buflen);
1660  print_signal_handler(st, SIGBUS , buf, buflen);
1661  print_signal_handler(st, SIGFPE , buf, buflen);
1662  print_signal_handler(st, SIGPIPE, buf, buflen);
1663  print_signal_handler(st, SIGXFSZ, buf, buflen);
1664  print_signal_handler(st, SIGILL , buf, buflen);
1665  print_signal_handler(st, SR_signum, buf, buflen);
1666  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1667  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1668  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1669  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1670  print_signal_handler(st, SIGTRAP, buf, buflen);
1671  print_signal_handler(st, SIGDANGER, buf, buflen);
1672}
1673
1674static char saved_jvm_path[MAXPATHLEN] = {0};
1675
1676// Find the full path to the current module, libjvm.so.
1677void os::jvm_path(char *buf, jint buflen) {
1678  // Error checking.
1679  if (buflen < MAXPATHLEN) {
1680    assert(false, "must use a large-enough buffer");
1681    buf[0] = '\0';
1682    return;
1683  }
1684  // Lazy resolve the path to current module.
1685  if (saved_jvm_path[0] != 0) {
1686    strcpy(buf, saved_jvm_path);
1687    return;
1688  }
1689
1690  Dl_info dlinfo;
1691  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1692  assert(ret != 0, "cannot locate libjvm");
1693  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1694  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1695
1696  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1697  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1698}
1699
1700void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1701  // no prefix required, not even "_"
1702}
1703
1704void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1705  // no suffix required
1706}
1707
1708////////////////////////////////////////////////////////////////////////////////
1709// sun.misc.Signal support
1710
1711static volatile jint sigint_count = 0;
1712
1713static void
1714UserHandler(int sig, void *siginfo, void *context) {
1715  // 4511530 - sem_post is serialized and handled by the manager thread. When
1716  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1717  // don't want to flood the manager thread with sem_post requests.
1718  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1719    return;
1720
1721  // Ctrl-C is pressed during error reporting, likely because the error
1722  // handler fails to abort. Let VM die immediately.
1723  if (sig == SIGINT && is_error_reported()) {
1724    os::die();
1725  }
1726
1727  os::signal_notify(sig);
1728}
1729
1730void* os::user_handler() {
1731  return CAST_FROM_FN_PTR(void*, UserHandler);
1732}
1733
1734extern "C" {
1735  typedef void (*sa_handler_t)(int);
1736  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1737}
1738
1739void* os::signal(int signal_number, void* handler) {
1740  struct sigaction sigAct, oldSigAct;
1741
1742  sigfillset(&(sigAct.sa_mask));
1743
1744  // Do not block out synchronous signals in the signal handler.
1745  // Blocking synchronous signals only makes sense if you can really
1746  // be sure that those signals won't happen during signal handling,
1747  // when the blocking applies. Normal signal handlers are lean and
1748  // do not cause signals. But our signal handlers tend to be "risky"
1749  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1750  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1751  // by a SIGILL, which was blocked due to the signal mask. The process
1752  // just hung forever. Better to crash from a secondary signal than to hang.
1753  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1754  sigdelset(&(sigAct.sa_mask), SIGBUS);
1755  sigdelset(&(sigAct.sa_mask), SIGILL);
1756  sigdelset(&(sigAct.sa_mask), SIGFPE);
1757  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1758
1759  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1760
1761  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1762
1763  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1764    // -1 means registration failed
1765    return (void *)-1;
1766  }
1767
1768  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1769}
1770
1771void os::signal_raise(int signal_number) {
1772  ::raise(signal_number);
1773}
1774
1775//
1776// The following code is moved from os.cpp for making this
1777// code platform specific, which it is by its very nature.
1778//
1779
1780// Will be modified when max signal is changed to be dynamic
1781int os::sigexitnum_pd() {
1782  return NSIG;
1783}
1784
1785// a counter for each possible signal value
1786static volatile jint pending_signals[NSIG+1] = { 0 };
1787
1788// Linux(POSIX) specific hand shaking semaphore.
1789static sem_t sig_sem;
1790
1791void os::signal_init_pd() {
1792  // Initialize signal structures
1793  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1794
1795  // Initialize signal semaphore
1796  int rc = ::sem_init(&sig_sem, 0, 0);
1797  guarantee(rc != -1, "sem_init failed");
1798}
1799
1800void os::signal_notify(int sig) {
1801  Atomic::inc(&pending_signals[sig]);
1802  ::sem_post(&sig_sem);
1803}
1804
1805static int check_pending_signals(bool wait) {
1806  Atomic::store(0, &sigint_count);
1807  for (;;) {
1808    for (int i = 0; i < NSIG + 1; i++) {
1809      jint n = pending_signals[i];
1810      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1811        return i;
1812      }
1813    }
1814    if (!wait) {
1815      return -1;
1816    }
1817    JavaThread *thread = JavaThread::current();
1818    ThreadBlockInVM tbivm(thread);
1819
1820    bool threadIsSuspended;
1821    do {
1822      thread->set_suspend_equivalent();
1823      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1824
1825      ::sem_wait(&sig_sem);
1826
1827      // were we externally suspended while we were waiting?
1828      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1829      if (threadIsSuspended) {
1830        //
1831        // The semaphore has been incremented, but while we were waiting
1832        // another thread suspended us. We don't want to continue running
1833        // while suspended because that would surprise the thread that
1834        // suspended us.
1835        //
1836        ::sem_post(&sig_sem);
1837
1838        thread->java_suspend_self();
1839      }
1840    } while (threadIsSuspended);
1841  }
1842}
1843
1844int os::signal_lookup() {
1845  return check_pending_signals(false);
1846}
1847
1848int os::signal_wait() {
1849  return check_pending_signals(true);
1850}
1851
1852////////////////////////////////////////////////////////////////////////////////
1853// Virtual Memory
1854
1855// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1856
1857#define VMEM_MAPPED  1
1858#define VMEM_SHMATED 2
1859
1860struct vmembk_t {
1861  int type;         // 1 - mmap, 2 - shmat
1862  char* addr;
1863  size_t size;      // Real size, may be larger than usersize.
1864  size_t pagesize;  // page size of area
1865  vmembk_t* next;
1866
1867  bool contains_addr(char* p) const {
1868    return p >= addr && p < (addr + size);
1869  }
1870
1871  bool contains_range(char* p, size_t s) const {
1872    return contains_addr(p) && contains_addr(p + s - 1);
1873  }
1874
1875  void print_on(outputStream* os) const {
1876    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1877      " bytes, %d %s pages), %s",
1878      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1879      (type == VMEM_SHMATED ? "shmat" : "mmap")
1880    );
1881  }
1882
1883  // Check that range is a sub range of memory block (or equal to memory block);
1884  // also check that range is fully page aligned to the page size if the block.
1885  void assert_is_valid_subrange(char* p, size_t s) const {
1886    if (!contains_range(p, s)) {
1887      fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1888              "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1889              p, p + s - 1, addr, addr + size - 1);
1890      guarantee0(false);
1891    }
1892    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1893      fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1894              " aligned to pagesize (%s)\n", p, p + s);
1895      guarantee0(false);
1896    }
1897  }
1898};
1899
1900static struct {
1901  vmembk_t* first;
1902  MiscUtils::CritSect cs;
1903} vmem;
1904
1905static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1906  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1907  assert0(p);
1908  if (p) {
1909    MiscUtils::AutoCritSect lck(&vmem.cs);
1910    p->addr = addr; p->size = size;
1911    p->pagesize = pagesize;
1912    p->type = type;
1913    p->next = vmem.first;
1914    vmem.first = p;
1915  }
1916}
1917
1918static vmembk_t* vmembk_find(char* addr) {
1919  MiscUtils::AutoCritSect lck(&vmem.cs);
1920  for (vmembk_t* p = vmem.first; p; p = p->next) {
1921    if (p->addr <= addr && (p->addr + p->size) > addr) {
1922      return p;
1923    }
1924  }
1925  return NULL;
1926}
1927
1928static void vmembk_remove(vmembk_t* p0) {
1929  MiscUtils::AutoCritSect lck(&vmem.cs);
1930  assert0(p0);
1931  assert0(vmem.first); // List should not be empty.
1932  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1933    if (*pp == p0) {
1934      *pp = p0->next;
1935      ::free(p0);
1936      return;
1937    }
1938  }
1939  assert0(false); // Not found?
1940}
1941
1942static void vmembk_print_on(outputStream* os) {
1943  MiscUtils::AutoCritSect lck(&vmem.cs);
1944  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1945    vmi->print_on(os);
1946    os->cr();
1947  }
1948}
1949
1950// Reserve and attach a section of System V memory.
1951// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1952// address. Failing that, it will attach the memory anywhere.
1953// If <requested_addr> is NULL, function will attach the memory anywhere.
1954//
1955// <alignment_hint> is being ignored by this function. It is very probable however that the
1956// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1957// Should this be not enogh, we can put more work into it.
1958static char* reserve_shmated_memory (
1959  size_t bytes,
1960  char* requested_addr,
1961  size_t alignment_hint) {
1962
1963  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1964    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1965    bytes, requested_addr, alignment_hint);
1966
1967  // Either give me wish address or wish alignment but not both.
1968  assert0(!(requested_addr != NULL && alignment_hint != 0));
1969
1970  // We must prevent anyone from attaching too close to the
1971  // BRK because that may cause malloc OOM.
1972  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1973    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1974      "Will attach anywhere.", requested_addr);
1975    // Act like the OS refused to attach there.
1976    requested_addr = NULL;
1977  }
1978
1979  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1980  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1981  if (os::Aix::on_pase_V5R4_or_older()) {
1982    ShouldNotReachHere();
1983  }
1984
1985  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1986  const size_t size = align_size_up(bytes, SIZE_64K);
1987
1988  // Reserve the shared segment.
1989  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1990  if (shmid == -1) {
1991    trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1992    return NULL;
1993  }
1994
1995  // Important note:
1996  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1997  // We must right after attaching it remove it from the system. System V shm segments are global and
1998  // survive the process.
1999  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2000
2001  struct shmid_ds shmbuf;
2002  memset(&shmbuf, 0, sizeof(shmbuf));
2003  shmbuf.shm_pagesize = SIZE_64K;
2004  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2005    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2006               size / SIZE_64K, errno);
2007    // I want to know if this ever happens.
2008    assert(false, "failed to set page size for shmat");
2009  }
2010
2011  // Now attach the shared segment.
2012  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2013  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2014  // were not a segment boundary.
2015  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2016  const int errno_shmat = errno;
2017
2018  // (A) Right after shmat and before handing shmat errors delete the shm segment.
2019  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2020    trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2021    assert(false, "failed to remove shared memory segment!");
2022  }
2023
2024  // Handle shmat error. If we failed to attach, just return.
2025  if (addr == (char*)-1) {
2026    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2027    return NULL;
2028  }
2029
2030  // Just for info: query the real page size. In case setting the page size did not
2031  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2032  const size_t real_pagesize = os::Aix::query_pagesize(addr);
2033  if (real_pagesize != shmbuf.shm_pagesize) {
2034    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2035  }
2036
2037  if (addr) {
2038    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2039      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2040  } else {
2041    if (requested_addr != NULL) {
2042      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2043    } else {
2044      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2045    }
2046  }
2047
2048  // book-keeping
2049  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2050  assert0(is_aligned_to(addr, os::vm_page_size()));
2051
2052  return addr;
2053}
2054
2055static bool release_shmated_memory(char* addr, size_t size) {
2056
2057  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2058    addr, addr + size - 1);
2059
2060  bool rc = false;
2061
2062  // TODO: is there a way to verify shm size without doing bookkeeping?
2063  if (::shmdt(addr) != 0) {
2064    trcVerbose("error (%d).", errno);
2065  } else {
2066    trcVerbose("ok.");
2067    rc = true;
2068  }
2069  return rc;
2070}
2071
2072static bool uncommit_shmated_memory(char* addr, size_t size) {
2073  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2074    addr, addr + size - 1);
2075
2076  const bool rc = my_disclaim64(addr, size);
2077
2078  if (!rc) {
2079    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2080    return false;
2081  }
2082  return true;
2083}
2084
2085// Reserve memory via mmap.
2086// If <requested_addr> is given, an attempt is made to attach at the given address.
2087// Failing that, memory is allocated at any address.
2088// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2089// allocate at an address aligned with the given alignment. Failing that, memory
2090// is aligned anywhere.
2091static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2092  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2093    "alignment_hint " UINTX_FORMAT "...",
2094    bytes, requested_addr, alignment_hint);
2095
2096  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2097  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2098    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2099    return NULL;
2100  }
2101
2102  // We must prevent anyone from attaching too close to the
2103  // BRK because that may cause malloc OOM.
2104  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2105    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2106      "Will attach anywhere.", requested_addr);
2107    // Act like the OS refused to attach there.
2108    requested_addr = NULL;
2109  }
2110
2111  // Specify one or the other but not both.
2112  assert0(!(requested_addr != NULL && alignment_hint > 0));
2113
2114  // In 64K mode, we claim the global page size (os::vm_page_size())
2115  // is 64K. This is one of the few points where that illusion may
2116  // break, because mmap() will always return memory aligned to 4K. So
2117  // we must ensure we only ever return memory aligned to 64k.
2118  if (alignment_hint) {
2119    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2120  } else {
2121    alignment_hint = os::vm_page_size();
2122  }
2123
2124  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2125  const size_t size = align_size_up(bytes, os::vm_page_size());
2126
2127  // alignment: Allocate memory large enough to include an aligned range of the right size and
2128  // cut off the leading and trailing waste pages.
2129  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2130  const size_t extra_size = size + alignment_hint;
2131
2132  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2133  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2134  int flags = MAP_ANONYMOUS | MAP_SHARED;
2135
2136  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2137  // it means if wishaddress is given but MAP_FIXED is not set.
2138  //
2139  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2140  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2141  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2142  // get clobbered.
2143  if (requested_addr != NULL) {
2144    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2145      flags |= MAP_FIXED;
2146    }
2147  }
2148
2149  char* addr = (char*)::mmap(requested_addr, extra_size,
2150      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2151
2152  if (addr == MAP_FAILED) {
2153    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2154    return NULL;
2155  }
2156
2157  // Handle alignment.
2158  char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2159  const size_t waste_pre = addr_aligned - addr;
2160  char* const addr_aligned_end = addr_aligned + size;
2161  const size_t waste_post = extra_size - waste_pre - size;
2162  if (waste_pre > 0) {
2163    ::munmap(addr, waste_pre);
2164  }
2165  if (waste_post > 0) {
2166    ::munmap(addr_aligned_end, waste_post);
2167  }
2168  addr = addr_aligned;
2169
2170  if (addr) {
2171    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2172      addr, addr + bytes, bytes);
2173  } else {
2174    if (requested_addr != NULL) {
2175      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2176    } else {
2177      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2178    }
2179  }
2180
2181  // bookkeeping
2182  vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2183
2184  // Test alignment, see above.
2185  assert0(is_aligned_to(addr, os::vm_page_size()));
2186
2187  return addr;
2188}
2189
2190static bool release_mmaped_memory(char* addr, size_t size) {
2191  assert0(is_aligned_to(addr, os::vm_page_size()));
2192  assert0(is_aligned_to(size, os::vm_page_size()));
2193
2194  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2195    addr, addr + size - 1);
2196  bool rc = false;
2197
2198  if (::munmap(addr, size) != 0) {
2199    trcVerbose("failed (%d)\n", errno);
2200    rc = false;
2201  } else {
2202    trcVerbose("ok.");
2203    rc = true;
2204  }
2205
2206  return rc;
2207}
2208
2209static bool uncommit_mmaped_memory(char* addr, size_t size) {
2210
2211  assert0(is_aligned_to(addr, os::vm_page_size()));
2212  assert0(is_aligned_to(size, os::vm_page_size()));
2213
2214  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2215    addr, addr + size - 1);
2216  bool rc = false;
2217
2218  // Uncommit mmap memory with msync MS_INVALIDATE.
2219  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2220    trcVerbose("failed (%d)\n", errno);
2221    rc = false;
2222  } else {
2223    trcVerbose("ok.");
2224    rc = true;
2225  }
2226
2227  return rc;
2228}
2229
2230// End: shared memory bookkeeping
2231////////////////////////////////////////////////////////////////////////////////////////////////////
2232
2233int os::vm_page_size() {
2234  // Seems redundant as all get out.
2235  assert(os::Aix::page_size() != -1, "must call os::init");
2236  return os::Aix::page_size();
2237}
2238
2239// Aix allocates memory by pages.
2240int os::vm_allocation_granularity() {
2241  assert(os::Aix::page_size() != -1, "must call os::init");
2242  return os::Aix::page_size();
2243}
2244
2245#ifdef PRODUCT
2246static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2247                                    int err) {
2248  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2249          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2250          strerror(err), err);
2251}
2252#endif
2253
2254void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2255                                  const char* mesg) {
2256  assert(mesg != NULL, "mesg must be specified");
2257  if (!pd_commit_memory(addr, size, exec)) {
2258    // Add extra info in product mode for vm_exit_out_of_memory():
2259    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2260    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2261  }
2262}
2263
2264bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2265
2266  assert0(is_aligned_to(addr, os::vm_page_size()));
2267  assert0(is_aligned_to(size, os::vm_page_size()));
2268
2269  vmembk_t* const vmi = vmembk_find(addr);
2270  assert0(vmi);
2271  vmi->assert_is_valid_subrange(addr, size);
2272
2273  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2274
2275  return true;
2276}
2277
2278bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2279  return pd_commit_memory(addr, size, exec);
2280}
2281
2282void os::pd_commit_memory_or_exit(char* addr, size_t size,
2283                                  size_t alignment_hint, bool exec,
2284                                  const char* mesg) {
2285  // Alignment_hint is ignored on this OS.
2286  pd_commit_memory_or_exit(addr, size, exec, mesg);
2287}
2288
2289bool os::pd_uncommit_memory(char* addr, size_t size) {
2290  assert0(is_aligned_to(addr, os::vm_page_size()));
2291  assert0(is_aligned_to(size, os::vm_page_size()));
2292
2293  // Dynamically do different things for mmap/shmat.
2294  const vmembk_t* const vmi = vmembk_find(addr);
2295  assert0(vmi);
2296  vmi->assert_is_valid_subrange(addr, size);
2297
2298  if (vmi->type == VMEM_SHMATED) {
2299    return uncommit_shmated_memory(addr, size);
2300  } else {
2301    return uncommit_mmaped_memory(addr, size);
2302  }
2303}
2304
2305bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2306  // Do not call this; no need to commit stack pages on AIX.
2307  ShouldNotReachHere();
2308  return true;
2309}
2310
2311bool os::remove_stack_guard_pages(char* addr, size_t size) {
2312  // Do not call this; no need to commit stack pages on AIX.
2313  ShouldNotReachHere();
2314  return true;
2315}
2316
2317void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2318}
2319
2320void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2321}
2322
2323void os::numa_make_global(char *addr, size_t bytes) {
2324}
2325
2326void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2327}
2328
2329bool os::numa_topology_changed() {
2330  return false;
2331}
2332
2333size_t os::numa_get_groups_num() {
2334  return 1;
2335}
2336
2337int os::numa_get_group_id() {
2338  return 0;
2339}
2340
2341size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2342  if (size > 0) {
2343    ids[0] = 0;
2344    return 1;
2345  }
2346  return 0;
2347}
2348
2349bool os::get_page_info(char *start, page_info* info) {
2350  return false;
2351}
2352
2353char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2354  return end;
2355}
2356
2357// Reserves and attaches a shared memory segment.
2358// Will assert if a wish address is given and could not be obtained.
2359char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2360
2361  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2362  // thereby clobbering old mappings at that place. That is probably
2363  // not intended, never used and almost certainly an error were it
2364  // ever be used this way (to try attaching at a specified address
2365  // without clobbering old mappings an alternate API exists,
2366  // os::attempt_reserve_memory_at()).
2367  // Instead of mimicking the dangerous coding of the other platforms, here I
2368  // just ignore the request address (release) or assert(debug).
2369  assert0(requested_addr == NULL);
2370
2371  // Always round to os::vm_page_size(), which may be larger than 4K.
2372  bytes = align_size_up(bytes, os::vm_page_size());
2373  const size_t alignment_hint0 =
2374    alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2375
2376  // In 4K mode always use mmap.
2377  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2378  if (os::vm_page_size() == SIZE_4K) {
2379    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2380  } else {
2381    if (bytes >= Use64KPagesThreshold) {
2382      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2383    } else {
2384      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2385    }
2386  }
2387}
2388
2389bool os::pd_release_memory(char* addr, size_t size) {
2390
2391  // Dynamically do different things for mmap/shmat.
2392  vmembk_t* const vmi = vmembk_find(addr);
2393  assert0(vmi);
2394
2395  // Always round to os::vm_page_size(), which may be larger than 4K.
2396  size = align_size_up(size, os::vm_page_size());
2397  addr = (char *)align_ptr_up(addr, os::vm_page_size());
2398
2399  bool rc = false;
2400  bool remove_bookkeeping = false;
2401  if (vmi->type == VMEM_SHMATED) {
2402    // For shmatted memory, we do:
2403    // - If user wants to release the whole range, release the memory (shmdt).
2404    // - If user only wants to release a partial range, uncommit (disclaim) that
2405    //   range. That way, at least, we do not use memory anymore (bust still page
2406    //   table space).
2407    vmi->assert_is_valid_subrange(addr, size);
2408    if (addr == vmi->addr && size == vmi->size) {
2409      rc = release_shmated_memory(addr, size);
2410      remove_bookkeeping = true;
2411    } else {
2412      rc = uncommit_shmated_memory(addr, size);
2413    }
2414  } else {
2415    // User may unmap partial regions but region has to be fully contained.
2416#ifdef ASSERT
2417    vmi->assert_is_valid_subrange(addr, size);
2418#endif
2419    rc = release_mmaped_memory(addr, size);
2420    remove_bookkeeping = true;
2421  }
2422
2423  // update bookkeeping
2424  if (rc && remove_bookkeeping) {
2425    vmembk_remove(vmi);
2426  }
2427
2428  return rc;
2429}
2430
2431static bool checked_mprotect(char* addr, size_t size, int prot) {
2432
2433  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2434  // not tell me if protection failed when trying to protect an un-protectable range.
2435  //
2436  // This means if the memory was allocated using shmget/shmat, protection wont work
2437  // but mprotect will still return 0:
2438  //
2439  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2440
2441  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2442
2443  if (!rc) {
2444    const char* const s_errno = strerror(errno);
2445    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2446    return false;
2447  }
2448
2449  // mprotect success check
2450  //
2451  // Mprotect said it changed the protection but can I believe it?
2452  //
2453  // To be sure I need to check the protection afterwards. Try to
2454  // read from protected memory and check whether that causes a segfault.
2455  //
2456  if (!os::Aix::xpg_sus_mode()) {
2457
2458    if (CanUseSafeFetch32()) {
2459
2460      const bool read_protected =
2461        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2462         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2463
2464      if (prot & PROT_READ) {
2465        rc = !read_protected;
2466      } else {
2467        rc = read_protected;
2468      }
2469    }
2470  }
2471  if (!rc) {
2472    assert(false, "mprotect failed.");
2473  }
2474  return rc;
2475}
2476
2477// Set protections specified
2478bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2479  unsigned int p = 0;
2480  switch (prot) {
2481  case MEM_PROT_NONE: p = PROT_NONE; break;
2482  case MEM_PROT_READ: p = PROT_READ; break;
2483  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2484  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2485  default:
2486    ShouldNotReachHere();
2487  }
2488  // is_committed is unused.
2489  return checked_mprotect(addr, size, p);
2490}
2491
2492bool os::guard_memory(char* addr, size_t size) {
2493  return checked_mprotect(addr, size, PROT_NONE);
2494}
2495
2496bool os::unguard_memory(char* addr, size_t size) {
2497  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2498}
2499
2500// Large page support
2501
2502static size_t _large_page_size = 0;
2503
2504// Enable large page support if OS allows that.
2505void os::large_page_init() {
2506  return; // Nothing to do. See query_multipage_support and friends.
2507}
2508
2509char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2510  // "exec" is passed in but not used. Creating the shared image for
2511  // the code cache doesn't have an SHM_X executable permission to check.
2512  Unimplemented();
2513  return 0;
2514}
2515
2516bool os::release_memory_special(char* base, size_t bytes) {
2517  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2518  Unimplemented();
2519  return false;
2520}
2521
2522size_t os::large_page_size() {
2523  return _large_page_size;
2524}
2525
2526bool os::can_commit_large_page_memory() {
2527  // Does not matter, we do not support huge pages.
2528  return false;
2529}
2530
2531bool os::can_execute_large_page_memory() {
2532  // Does not matter, we do not support huge pages.
2533  return false;
2534}
2535
2536// Reserve memory at an arbitrary address, only if that area is
2537// available (and not reserved for something else).
2538char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2539  char* addr = NULL;
2540
2541  // Always round to os::vm_page_size(), which may be larger than 4K.
2542  bytes = align_size_up(bytes, os::vm_page_size());
2543
2544  // In 4K mode always use mmap.
2545  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2546  if (os::vm_page_size() == SIZE_4K) {
2547    return reserve_mmaped_memory(bytes, requested_addr, 0);
2548  } else {
2549    if (bytes >= Use64KPagesThreshold) {
2550      return reserve_shmated_memory(bytes, requested_addr, 0);
2551    } else {
2552      return reserve_mmaped_memory(bytes, requested_addr, 0);
2553    }
2554  }
2555
2556  return addr;
2557}
2558
2559size_t os::read(int fd, void *buf, unsigned int nBytes) {
2560  return ::read(fd, buf, nBytes);
2561}
2562
2563size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2564  return ::pread(fd, buf, nBytes, offset);
2565}
2566
2567void os::naked_short_sleep(jlong ms) {
2568  struct timespec req;
2569
2570  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2571  req.tv_sec = 0;
2572  if (ms > 0) {
2573    req.tv_nsec = (ms % 1000) * 1000000;
2574  }
2575  else {
2576    req.tv_nsec = 1;
2577  }
2578
2579  nanosleep(&req, NULL);
2580
2581  return;
2582}
2583
2584// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2585void os::infinite_sleep() {
2586  while (true) {    // sleep forever ...
2587    ::sleep(100);   // ... 100 seconds at a time
2588  }
2589}
2590
2591// Used to convert frequent JVM_Yield() to nops
2592bool os::dont_yield() {
2593  return DontYieldALot;
2594}
2595
2596void os::naked_yield() {
2597  sched_yield();
2598}
2599
2600////////////////////////////////////////////////////////////////////////////////
2601// thread priority support
2602
2603// From AIX manpage to pthread_setschedparam
2604// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2605//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2606//
2607// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2608// range from 40 to 80, where 40 is the least favored priority and 80
2609// is the most favored."
2610//
2611// (Actually, I doubt this even has an impact on AIX, as we do kernel
2612// scheduling there; however, this still leaves iSeries.)
2613//
2614// We use the same values for AIX and PASE.
2615int os::java_to_os_priority[CriticalPriority + 1] = {
2616  54,             // 0 Entry should never be used
2617
2618  55,             // 1 MinPriority
2619  55,             // 2
2620  56,             // 3
2621
2622  56,             // 4
2623  57,             // 5 NormPriority
2624  57,             // 6
2625
2626  58,             // 7
2627  58,             // 8
2628  59,             // 9 NearMaxPriority
2629
2630  60,             // 10 MaxPriority
2631
2632  60              // 11 CriticalPriority
2633};
2634
2635OSReturn os::set_native_priority(Thread* thread, int newpri) {
2636  if (!UseThreadPriorities) return OS_OK;
2637  pthread_t thr = thread->osthread()->pthread_id();
2638  int policy = SCHED_OTHER;
2639  struct sched_param param;
2640  param.sched_priority = newpri;
2641  int ret = pthread_setschedparam(thr, policy, &param);
2642
2643  if (ret != 0) {
2644    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2645        (int)thr, newpri, ret, strerror(ret));
2646  }
2647  return (ret == 0) ? OS_OK : OS_ERR;
2648}
2649
2650OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2651  if (!UseThreadPriorities) {
2652    *priority_ptr = java_to_os_priority[NormPriority];
2653    return OS_OK;
2654  }
2655  pthread_t thr = thread->osthread()->pthread_id();
2656  int policy = SCHED_OTHER;
2657  struct sched_param param;
2658  int ret = pthread_getschedparam(thr, &policy, &param);
2659  *priority_ptr = param.sched_priority;
2660
2661  return (ret == 0) ? OS_OK : OS_ERR;
2662}
2663
2664// Hint to the underlying OS that a task switch would not be good.
2665// Void return because it's a hint and can fail.
2666void os::hint_no_preempt() {}
2667
2668////////////////////////////////////////////////////////////////////////////////
2669// suspend/resume support
2670
2671//  the low-level signal-based suspend/resume support is a remnant from the
2672//  old VM-suspension that used to be for java-suspension, safepoints etc,
2673//  within hotspot. Now there is a single use-case for this:
2674//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2675//      that runs in the watcher thread.
2676//  The remaining code is greatly simplified from the more general suspension
2677//  code that used to be used.
2678//
2679//  The protocol is quite simple:
2680//  - suspend:
2681//      - sends a signal to the target thread
2682//      - polls the suspend state of the osthread using a yield loop
2683//      - target thread signal handler (SR_handler) sets suspend state
2684//        and blocks in sigsuspend until continued
2685//  - resume:
2686//      - sets target osthread state to continue
2687//      - sends signal to end the sigsuspend loop in the SR_handler
2688//
2689//  Note that the SR_lock plays no role in this suspend/resume protocol.
2690//
2691
2692static void resume_clear_context(OSThread *osthread) {
2693  osthread->set_ucontext(NULL);
2694  osthread->set_siginfo(NULL);
2695}
2696
2697static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2698  osthread->set_ucontext(context);
2699  osthread->set_siginfo(siginfo);
2700}
2701
2702//
2703// Handler function invoked when a thread's execution is suspended or
2704// resumed. We have to be careful that only async-safe functions are
2705// called here (Note: most pthread functions are not async safe and
2706// should be avoided.)
2707//
2708// Note: sigwait() is a more natural fit than sigsuspend() from an
2709// interface point of view, but sigwait() prevents the signal hander
2710// from being run. libpthread would get very confused by not having
2711// its signal handlers run and prevents sigwait()'s use with the
2712// mutex granting granting signal.
2713//
2714// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2715//
2716static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2717  // Save and restore errno to avoid confusing native code with EINTR
2718  // after sigsuspend.
2719  int old_errno = errno;
2720
2721  Thread* thread = Thread::current();
2722  OSThread* osthread = thread->osthread();
2723  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2724
2725  os::SuspendResume::State current = osthread->sr.state();
2726  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2727    suspend_save_context(osthread, siginfo, context);
2728
2729    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2730    os::SuspendResume::State state = osthread->sr.suspended();
2731    if (state == os::SuspendResume::SR_SUSPENDED) {
2732      sigset_t suspend_set;  // signals for sigsuspend()
2733
2734      // get current set of blocked signals and unblock resume signal
2735      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2736      sigdelset(&suspend_set, SR_signum);
2737
2738      // wait here until we are resumed
2739      while (1) {
2740        sigsuspend(&suspend_set);
2741
2742        os::SuspendResume::State result = osthread->sr.running();
2743        if (result == os::SuspendResume::SR_RUNNING) {
2744          break;
2745        }
2746      }
2747
2748    } else if (state == os::SuspendResume::SR_RUNNING) {
2749      // request was cancelled, continue
2750    } else {
2751      ShouldNotReachHere();
2752    }
2753
2754    resume_clear_context(osthread);
2755  } else if (current == os::SuspendResume::SR_RUNNING) {
2756    // request was cancelled, continue
2757  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2758    // ignore
2759  } else {
2760    ShouldNotReachHere();
2761  }
2762
2763  errno = old_errno;
2764}
2765
2766static int SR_initialize() {
2767  struct sigaction act;
2768  char *s;
2769  // Get signal number to use for suspend/resume
2770  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2771    int sig = ::strtol(s, 0, 10);
2772    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2773        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2774      SR_signum = sig;
2775    } else {
2776      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2777              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2778    }
2779  }
2780
2781  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2782        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2783
2784  sigemptyset(&SR_sigset);
2785  sigaddset(&SR_sigset, SR_signum);
2786
2787  // Set up signal handler for suspend/resume.
2788  act.sa_flags = SA_RESTART|SA_SIGINFO;
2789  act.sa_handler = (void (*)(int)) SR_handler;
2790
2791  // SR_signum is blocked by default.
2792  // 4528190 - We also need to block pthread restart signal (32 on all
2793  // supported Linux platforms). Note that LinuxThreads need to block
2794  // this signal for all threads to work properly. So we don't have
2795  // to use hard-coded signal number when setting up the mask.
2796  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2797
2798  if (sigaction(SR_signum, &act, 0) == -1) {
2799    return -1;
2800  }
2801
2802  // Save signal flag
2803  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2804  return 0;
2805}
2806
2807static int SR_finalize() {
2808  return 0;
2809}
2810
2811static int sr_notify(OSThread* osthread) {
2812  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2813  assert_status(status == 0, status, "pthread_kill");
2814  return status;
2815}
2816
2817// "Randomly" selected value for how long we want to spin
2818// before bailing out on suspending a thread, also how often
2819// we send a signal to a thread we want to resume
2820static const int RANDOMLY_LARGE_INTEGER = 1000000;
2821static const int RANDOMLY_LARGE_INTEGER2 = 100;
2822
2823// returns true on success and false on error - really an error is fatal
2824// but this seems the normal response to library errors
2825static bool do_suspend(OSThread* osthread) {
2826  assert(osthread->sr.is_running(), "thread should be running");
2827  // mark as suspended and send signal
2828
2829  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2830    // failed to switch, state wasn't running?
2831    ShouldNotReachHere();
2832    return false;
2833  }
2834
2835  if (sr_notify(osthread) != 0) {
2836    // try to cancel, switch to running
2837
2838    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2839    if (result == os::SuspendResume::SR_RUNNING) {
2840      // cancelled
2841      return false;
2842    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2843      // somehow managed to suspend
2844      return true;
2845    } else {
2846      ShouldNotReachHere();
2847      return false;
2848    }
2849  }
2850
2851  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2852
2853  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2854    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2855      os::naked_yield();
2856    }
2857
2858    // timeout, try to cancel the request
2859    if (n >= RANDOMLY_LARGE_INTEGER) {
2860      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2861      if (cancelled == os::SuspendResume::SR_RUNNING) {
2862        return false;
2863      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2864        return true;
2865      } else {
2866        ShouldNotReachHere();
2867        return false;
2868      }
2869    }
2870  }
2871
2872  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2873  return true;
2874}
2875
2876static void do_resume(OSThread* osthread) {
2877  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2878
2879  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2880    // failed to switch to WAKEUP_REQUEST
2881    ShouldNotReachHere();
2882    return;
2883  }
2884
2885  while (!osthread->sr.is_running()) {
2886    if (sr_notify(osthread) == 0) {
2887      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2888        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2889          os::naked_yield();
2890        }
2891      }
2892    } else {
2893      ShouldNotReachHere();
2894    }
2895  }
2896
2897  guarantee(osthread->sr.is_running(), "Must be running!");
2898}
2899
2900///////////////////////////////////////////////////////////////////////////////////
2901// signal handling (except suspend/resume)
2902
2903// This routine may be used by user applications as a "hook" to catch signals.
2904// The user-defined signal handler must pass unrecognized signals to this
2905// routine, and if it returns true (non-zero), then the signal handler must
2906// return immediately. If the flag "abort_if_unrecognized" is true, then this
2907// routine will never retun false (zero), but instead will execute a VM panic
2908// routine kill the process.
2909//
2910// If this routine returns false, it is OK to call it again. This allows
2911// the user-defined signal handler to perform checks either before or after
2912// the VM performs its own checks. Naturally, the user code would be making
2913// a serious error if it tried to handle an exception (such as a null check
2914// or breakpoint) that the VM was generating for its own correct operation.
2915//
2916// This routine may recognize any of the following kinds of signals:
2917//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2918// It should be consulted by handlers for any of those signals.
2919//
2920// The caller of this routine must pass in the three arguments supplied
2921// to the function referred to in the "sa_sigaction" (not the "sa_handler")
2922// field of the structure passed to sigaction(). This routine assumes that
2923// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2924//
2925// Note that the VM will print warnings if it detects conflicting signal
2926// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2927//
2928extern "C" JNIEXPORT int
2929JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2930
2931// Set thread signal mask (for some reason on AIX sigthreadmask() seems
2932// to be the thing to call; documentation is not terribly clear about whether
2933// pthread_sigmask also works, and if it does, whether it does the same.
2934bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2935  const int rc = ::pthread_sigmask(how, set, oset);
2936  // return value semantics differ slightly for error case:
2937  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2938  // (so, pthread_sigmask is more theadsafe for error handling)
2939  // But success is always 0.
2940  return rc == 0 ? true : false;
2941}
2942
2943// Function to unblock all signals which are, according
2944// to POSIX, typical program error signals. If they happen while being blocked,
2945// they typically will bring down the process immediately.
2946bool unblock_program_error_signals() {
2947  sigset_t set;
2948  ::sigemptyset(&set);
2949  ::sigaddset(&set, SIGILL);
2950  ::sigaddset(&set, SIGBUS);
2951  ::sigaddset(&set, SIGFPE);
2952  ::sigaddset(&set, SIGSEGV);
2953  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2954}
2955
2956// Renamed from 'signalHandler' to avoid collision with other shared libs.
2957void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2958  assert(info != NULL && uc != NULL, "it must be old kernel");
2959
2960  // Never leave program error signals blocked;
2961  // on all our platforms they would bring down the process immediately when
2962  // getting raised while being blocked.
2963  unblock_program_error_signals();
2964
2965  JVM_handle_aix_signal(sig, info, uc, true);
2966}
2967
2968// This boolean allows users to forward their own non-matching signals
2969// to JVM_handle_aix_signal, harmlessly.
2970bool os::Aix::signal_handlers_are_installed = false;
2971
2972// For signal-chaining
2973struct sigaction sigact[NSIG];
2974sigset_t sigs;
2975bool os::Aix::libjsig_is_loaded = false;
2976typedef struct sigaction *(*get_signal_t)(int);
2977get_signal_t os::Aix::get_signal_action = NULL;
2978
2979struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2980  struct sigaction *actp = NULL;
2981
2982  if (libjsig_is_loaded) {
2983    // Retrieve the old signal handler from libjsig
2984    actp = (*get_signal_action)(sig);
2985  }
2986  if (actp == NULL) {
2987    // Retrieve the preinstalled signal handler from jvm
2988    actp = get_preinstalled_handler(sig);
2989  }
2990
2991  return actp;
2992}
2993
2994static bool call_chained_handler(struct sigaction *actp, int sig,
2995                                 siginfo_t *siginfo, void *context) {
2996  // Call the old signal handler
2997  if (actp->sa_handler == SIG_DFL) {
2998    // It's more reasonable to let jvm treat it as an unexpected exception
2999    // instead of taking the default action.
3000    return false;
3001  } else if (actp->sa_handler != SIG_IGN) {
3002    if ((actp->sa_flags & SA_NODEFER) == 0) {
3003      // automaticlly block the signal
3004      sigaddset(&(actp->sa_mask), sig);
3005    }
3006
3007    sa_handler_t hand = NULL;
3008    sa_sigaction_t sa = NULL;
3009    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3010    // retrieve the chained handler
3011    if (siginfo_flag_set) {
3012      sa = actp->sa_sigaction;
3013    } else {
3014      hand = actp->sa_handler;
3015    }
3016
3017    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3018      actp->sa_handler = SIG_DFL;
3019    }
3020
3021    // try to honor the signal mask
3022    sigset_t oset;
3023    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3024
3025    // call into the chained handler
3026    if (siginfo_flag_set) {
3027      (*sa)(sig, siginfo, context);
3028    } else {
3029      (*hand)(sig);
3030    }
3031
3032    // restore the signal mask
3033    pthread_sigmask(SIG_SETMASK, &oset, 0);
3034  }
3035  // Tell jvm's signal handler the signal is taken care of.
3036  return true;
3037}
3038
3039bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3040  bool chained = false;
3041  // signal-chaining
3042  if (UseSignalChaining) {
3043    struct sigaction *actp = get_chained_signal_action(sig);
3044    if (actp != NULL) {
3045      chained = call_chained_handler(actp, sig, siginfo, context);
3046    }
3047  }
3048  return chained;
3049}
3050
3051struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3052  if (sigismember(&sigs, sig)) {
3053    return &sigact[sig];
3054  }
3055  return NULL;
3056}
3057
3058void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3059  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3060  sigact[sig] = oldAct;
3061  sigaddset(&sigs, sig);
3062}
3063
3064// for diagnostic
3065int sigflags[NSIG];
3066
3067int os::Aix::get_our_sigflags(int sig) {
3068  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3069  return sigflags[sig];
3070}
3071
3072void os::Aix::set_our_sigflags(int sig, int flags) {
3073  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3074  if (sig > 0 && sig < NSIG) {
3075    sigflags[sig] = flags;
3076  }
3077}
3078
3079void os::Aix::set_signal_handler(int sig, bool set_installed) {
3080  // Check for overwrite.
3081  struct sigaction oldAct;
3082  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3083
3084  void* oldhand = oldAct.sa_sigaction
3085    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3086    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3087  // Renamed 'signalHandler' to avoid collision with other shared libs.
3088  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3089      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3090      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3091    if (AllowUserSignalHandlers || !set_installed) {
3092      // Do not overwrite; user takes responsibility to forward to us.
3093      return;
3094    } else if (UseSignalChaining) {
3095      // save the old handler in jvm
3096      save_preinstalled_handler(sig, oldAct);
3097      // libjsig also interposes the sigaction() call below and saves the
3098      // old sigaction on it own.
3099    } else {
3100      fatal("Encountered unexpected pre-existing sigaction handler "
3101            "%#lx for signal %d.", (long)oldhand, sig);
3102    }
3103  }
3104
3105  struct sigaction sigAct;
3106  sigfillset(&(sigAct.sa_mask));
3107  if (!set_installed) {
3108    sigAct.sa_handler = SIG_DFL;
3109    sigAct.sa_flags = SA_RESTART;
3110  } else {
3111    // Renamed 'signalHandler' to avoid collision with other shared libs.
3112    sigAct.sa_sigaction = javaSignalHandler;
3113    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3114  }
3115  // Save flags, which are set by ours
3116  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3117  sigflags[sig] = sigAct.sa_flags;
3118
3119  int ret = sigaction(sig, &sigAct, &oldAct);
3120  assert(ret == 0, "check");
3121
3122  void* oldhand2 = oldAct.sa_sigaction
3123                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3124                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3125  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3126}
3127
3128// install signal handlers for signals that HotSpot needs to
3129// handle in order to support Java-level exception handling.
3130void os::Aix::install_signal_handlers() {
3131  if (!signal_handlers_are_installed) {
3132    signal_handlers_are_installed = true;
3133
3134    // signal-chaining
3135    typedef void (*signal_setting_t)();
3136    signal_setting_t begin_signal_setting = NULL;
3137    signal_setting_t end_signal_setting = NULL;
3138    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3139                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3140    if (begin_signal_setting != NULL) {
3141      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3142                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3143      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3144                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3145      libjsig_is_loaded = true;
3146      assert(UseSignalChaining, "should enable signal-chaining");
3147    }
3148    if (libjsig_is_loaded) {
3149      // Tell libjsig jvm is setting signal handlers.
3150      (*begin_signal_setting)();
3151    }
3152
3153    ::sigemptyset(&sigs);
3154    set_signal_handler(SIGSEGV, true);
3155    set_signal_handler(SIGPIPE, true);
3156    set_signal_handler(SIGBUS, true);
3157    set_signal_handler(SIGILL, true);
3158    set_signal_handler(SIGFPE, true);
3159    set_signal_handler(SIGTRAP, true);
3160    set_signal_handler(SIGXFSZ, true);
3161    set_signal_handler(SIGDANGER, true);
3162
3163    if (libjsig_is_loaded) {
3164      // Tell libjsig jvm finishes setting signal handlers.
3165      (*end_signal_setting)();
3166    }
3167
3168    // We don't activate signal checker if libjsig is in place, we trust ourselves
3169    // and if UserSignalHandler is installed all bets are off.
3170    // Log that signal checking is off only if -verbose:jni is specified.
3171    if (CheckJNICalls) {
3172      if (libjsig_is_loaded) {
3173        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3174        check_signals = false;
3175      }
3176      if (AllowUserSignalHandlers) {
3177        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3178        check_signals = false;
3179      }
3180      // Need to initialize check_signal_done.
3181      ::sigemptyset(&check_signal_done);
3182    }
3183  }
3184}
3185
3186static const char* get_signal_handler_name(address handler,
3187                                           char* buf, int buflen) {
3188  int offset;
3189  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3190  if (found) {
3191    // skip directory names
3192    const char *p1, *p2;
3193    p1 = buf;
3194    size_t len = strlen(os::file_separator());
3195    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3196    // The way os::dll_address_to_library_name is implemented on Aix
3197    // right now, it always returns -1 for the offset which is not
3198    // terribly informative.
3199    // Will fix that. For now, omit the offset.
3200    jio_snprintf(buf, buflen, "%s", p1);
3201  } else {
3202    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3203  }
3204  return buf;
3205}
3206
3207static void print_signal_handler(outputStream* st, int sig,
3208                                 char* buf, size_t buflen) {
3209  struct sigaction sa;
3210  sigaction(sig, NULL, &sa);
3211
3212  st->print("%s: ", os::exception_name(sig, buf, buflen));
3213
3214  address handler = (sa.sa_flags & SA_SIGINFO)
3215    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3216    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3217
3218  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3219    st->print("SIG_DFL");
3220  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3221    st->print("SIG_IGN");
3222  } else {
3223    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3224  }
3225
3226  // Print readable mask.
3227  st->print(", sa_mask[0]=");
3228  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3229
3230  address rh = VMError::get_resetted_sighandler(sig);
3231  // May be, handler was resetted by VMError?
3232  if (rh != NULL) {
3233    handler = rh;
3234    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3235  }
3236
3237  // Print textual representation of sa_flags.
3238  st->print(", sa_flags=");
3239  os::Posix::print_sa_flags(st, sa.sa_flags);
3240
3241  // Check: is it our handler?
3242  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3243      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3244    // It is our signal handler.
3245    // Check for flags, reset system-used one!
3246    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3247      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3248                os::Aix::get_our_sigflags(sig));
3249    }
3250  }
3251  st->cr();
3252}
3253
3254#define DO_SIGNAL_CHECK(sig) \
3255  if (!sigismember(&check_signal_done, sig)) \
3256    os::Aix::check_signal_handler(sig)
3257
3258// This method is a periodic task to check for misbehaving JNI applications
3259// under CheckJNI, we can add any periodic checks here
3260
3261void os::run_periodic_checks() {
3262
3263  if (check_signals == false) return;
3264
3265  // SEGV and BUS if overridden could potentially prevent
3266  // generation of hs*.log in the event of a crash, debugging
3267  // such a case can be very challenging, so we absolutely
3268  // check the following for a good measure:
3269  DO_SIGNAL_CHECK(SIGSEGV);
3270  DO_SIGNAL_CHECK(SIGILL);
3271  DO_SIGNAL_CHECK(SIGFPE);
3272  DO_SIGNAL_CHECK(SIGBUS);
3273  DO_SIGNAL_CHECK(SIGPIPE);
3274  DO_SIGNAL_CHECK(SIGXFSZ);
3275  if (UseSIGTRAP) {
3276    DO_SIGNAL_CHECK(SIGTRAP);
3277  }
3278  DO_SIGNAL_CHECK(SIGDANGER);
3279
3280  // ReduceSignalUsage allows the user to override these handlers
3281  // see comments at the very top and jvm_solaris.h
3282  if (!ReduceSignalUsage) {
3283    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3284    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3285    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3286    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3287  }
3288
3289  DO_SIGNAL_CHECK(SR_signum);
3290}
3291
3292typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3293
3294static os_sigaction_t os_sigaction = NULL;
3295
3296void os::Aix::check_signal_handler(int sig) {
3297  char buf[O_BUFLEN];
3298  address jvmHandler = NULL;
3299
3300  struct sigaction act;
3301  if (os_sigaction == NULL) {
3302    // only trust the default sigaction, in case it has been interposed
3303    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3304    if (os_sigaction == NULL) return;
3305  }
3306
3307  os_sigaction(sig, (struct sigaction*)NULL, &act);
3308
3309  address thisHandler = (act.sa_flags & SA_SIGINFO)
3310    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3311    : CAST_FROM_FN_PTR(address, act.sa_handler);
3312
3313  switch(sig) {
3314  case SIGSEGV:
3315  case SIGBUS:
3316  case SIGFPE:
3317  case SIGPIPE:
3318  case SIGILL:
3319  case SIGXFSZ:
3320    // Renamed 'signalHandler' to avoid collision with other shared libs.
3321    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3322    break;
3323
3324  case SHUTDOWN1_SIGNAL:
3325  case SHUTDOWN2_SIGNAL:
3326  case SHUTDOWN3_SIGNAL:
3327  case BREAK_SIGNAL:
3328    jvmHandler = (address)user_handler();
3329    break;
3330
3331  default:
3332    if (sig == SR_signum) {
3333      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3334    } else {
3335      return;
3336    }
3337    break;
3338  }
3339
3340  if (thisHandler != jvmHandler) {
3341    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3342    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3343    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3344    // No need to check this sig any longer
3345    sigaddset(&check_signal_done, sig);
3346    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3347    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3348      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3349                    exception_name(sig, buf, O_BUFLEN));
3350    }
3351  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3352    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3353    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3354    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3355    // No need to check this sig any longer
3356    sigaddset(&check_signal_done, sig);
3357  }
3358
3359  // Dump all the signal
3360  if (sigismember(&check_signal_done, sig)) {
3361    print_signal_handlers(tty, buf, O_BUFLEN);
3362  }
3363}
3364
3365// To install functions for atexit system call
3366extern "C" {
3367  static void perfMemory_exit_helper() {
3368    perfMemory_exit();
3369  }
3370}
3371
3372// This is called _before_ the most of global arguments have been parsed.
3373void os::init(void) {
3374  // This is basic, we want to know if that ever changes.
3375  // (Shared memory boundary is supposed to be a 256M aligned.)
3376  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3377
3378  // First off, we need to know whether we run on AIX or PASE, and
3379  // the OS level we run on.
3380  os::Aix::initialize_os_info();
3381
3382  // Scan environment (SPEC1170 behaviour, etc).
3383  os::Aix::scan_environment();
3384
3385  // Check which pages are supported by AIX.
3386  query_multipage_support();
3387
3388  // Act like we only have one page size by eliminating corner cases which
3389  // we did not support very well anyway.
3390  // We have two input conditions:
3391  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3392  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3393  //    setting.
3394  //    Data segment page size is important for us because it defines the thread stack page
3395  //    size, which is needed for guard page handling, stack banging etc.
3396  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3397  //    and should be allocated with 64k pages.
3398  //
3399  // So, we do the following:
3400  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3401  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3402  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3403  // 64k          no              --- AIX 5.2 ? ---
3404  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3405
3406  // We explicitly leave no option to change page size, because only upgrading would work,
3407  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3408
3409  if (g_multipage_support.datapsize == SIZE_4K) {
3410    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3411    if (g_multipage_support.can_use_64K_pages) {
3412      // .. but we are able to use 64K pages dynamically.
3413      // This would be typical for java launchers which are not linked
3414      // with datapsize=64K (like, any other launcher but our own).
3415      //
3416      // In this case it would be smart to allocate the java heap with 64K
3417      // to get the performance benefit, and to fake 64k pages for the
3418      // data segment (when dealing with thread stacks).
3419      //
3420      // However, leave a possibility to downgrade to 4K, using
3421      // -XX:-Use64KPages.
3422      if (Use64KPages) {
3423        trcVerbose("64K page mode (faked for data segment)");
3424        Aix::_page_size = SIZE_64K;
3425      } else {
3426        trcVerbose("4K page mode (Use64KPages=off)");
3427        Aix::_page_size = SIZE_4K;
3428      }
3429    } else {
3430      // .. and not able to allocate 64k pages dynamically. Here, just
3431      // fall back to 4K paged mode and use mmap for everything.
3432      trcVerbose("4K page mode");
3433      Aix::_page_size = SIZE_4K;
3434      FLAG_SET_ERGO(bool, Use64KPages, false);
3435    }
3436  } else {
3437    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3438    //   This normally means that we can allocate 64k pages dynamically.
3439    //   (There is one special case where this may be false: EXTSHM=on.
3440    //    but we decided to not support that mode).
3441    assert0(g_multipage_support.can_use_64K_pages);
3442    Aix::_page_size = SIZE_64K;
3443    trcVerbose("64K page mode");
3444    FLAG_SET_ERGO(bool, Use64KPages, true);
3445  }
3446
3447  // Short-wire stack page size to base page size; if that works, we just remove
3448  // that stack page size altogether.
3449  Aix::_stack_page_size = Aix::_page_size;
3450
3451  // For now UseLargePages is just ignored.
3452  FLAG_SET_ERGO(bool, UseLargePages, false);
3453  _page_sizes[0] = 0;
3454
3455  // debug trace
3456  trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3457
3458  // Next, we need to initialize libo4 and libperfstat libraries.
3459  if (os::Aix::on_pase()) {
3460    os::Aix::initialize_libo4();
3461  } else {
3462    os::Aix::initialize_libperfstat();
3463  }
3464
3465  // Reset the perfstat information provided by ODM.
3466  if (os::Aix::on_aix()) {
3467    libperfstat::perfstat_reset();
3468  }
3469
3470  // Now initialze basic system properties. Note that for some of the values we
3471  // need libperfstat etc.
3472  os::Aix::initialize_system_info();
3473
3474  _initial_pid = getpid();
3475
3476  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3477
3478  init_random(1234567);
3479
3480  ThreadCritical::initialize();
3481
3482  // Main_thread points to the aboriginal thread.
3483  Aix::_main_thread = pthread_self();
3484
3485  initial_time_count = os::elapsed_counter();
3486
3487  // If the pagesize of the VM is greater than 8K determine the appropriate
3488  // number of initial guard pages. The user can change this with the
3489  // command line arguments, if needed.
3490  if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3491    StackYellowPages = 1;
3492    StackRedPages = 1;
3493    StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3494  }
3495}
3496
3497// This is called _after_ the global arguments have been parsed.
3498jint os::init_2(void) {
3499
3500  trcVerbose("processor count: %d", os::_processor_count);
3501  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3502
3503  // Initially build up the loaded dll map.
3504  LoadedLibraries::reload();
3505
3506  const int page_size = Aix::page_size();
3507  const int map_size = page_size;
3508
3509  address map_address = (address) MAP_FAILED;
3510  const int prot  = PROT_READ;
3511  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3512
3513  // Use optimized addresses for the polling page,
3514  // e.g. map it to a special 32-bit address.
3515  if (OptimizePollingPageLocation) {
3516    // architecture-specific list of address wishes:
3517    address address_wishes[] = {
3518      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3519      // PPC64: all address wishes are non-negative 32 bit values where
3520      // the lower 16 bits are all zero. we can load these addresses
3521      // with a single ppc_lis instruction.
3522      (address) 0x30000000, (address) 0x31000000,
3523      (address) 0x32000000, (address) 0x33000000,
3524      (address) 0x40000000, (address) 0x41000000,
3525      (address) 0x42000000, (address) 0x43000000,
3526      (address) 0x50000000, (address) 0x51000000,
3527      (address) 0x52000000, (address) 0x53000000,
3528      (address) 0x60000000, (address) 0x61000000,
3529      (address) 0x62000000, (address) 0x63000000
3530    };
3531    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3532
3533    // iterate over the list of address wishes:
3534    for (int i=0; i<address_wishes_length; i++) {
3535      // Try to map with current address wish.
3536      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3537      // fail if the address is already mapped.
3538      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3539                                     map_size, prot,
3540                                     flags | MAP_FIXED,
3541                                     -1, 0);
3542      if (Verbose) {
3543        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3544                address_wishes[i], map_address + (ssize_t)page_size);
3545      }
3546
3547      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3548        // Map succeeded and map_address is at wished address, exit loop.
3549        break;
3550      }
3551
3552      if (map_address != (address) MAP_FAILED) {
3553        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3554        ::munmap(map_address, map_size);
3555        map_address = (address) MAP_FAILED;
3556      }
3557      // Map failed, continue loop.
3558    }
3559  } // end OptimizePollingPageLocation
3560
3561  if (map_address == (address) MAP_FAILED) {
3562    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3563  }
3564  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3565  os::set_polling_page(map_address);
3566
3567  if (!UseMembar) {
3568    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3569    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3570    os::set_memory_serialize_page(mem_serialize_page);
3571
3572#ifndef PRODUCT
3573    if (Verbose && PrintMiscellaneous) {
3574      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3575    }
3576#endif
3577  }
3578
3579  // initialize suspend/resume support - must do this before signal_sets_init()
3580  if (SR_initialize() != 0) {
3581    perror("SR_initialize failed");
3582    return JNI_ERR;
3583  }
3584
3585  Aix::signal_sets_init();
3586  Aix::install_signal_handlers();
3587
3588  // Check minimum allowable stack size for thread creation and to initialize
3589  // the java system classes, including StackOverflowError - depends on page
3590  // size. Add a page for compiler2 recursion in main thread.
3591  // Add in 2*BytesPerWord times page size to account for VM stack during
3592  // class initialization depending on 32 or 64 bit VM.
3593  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3594            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3595                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3596
3597  os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3598
3599  size_t threadStackSizeInBytes = ThreadStackSize * K;
3600  if (threadStackSizeInBytes != 0 &&
3601      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3602    tty->print_cr("\nThe stack size specified is too small, "
3603                  "Specify at least %dk",
3604                  os::Aix::min_stack_allowed / K);
3605    return JNI_ERR;
3606  }
3607
3608  // Make the stack size a multiple of the page size so that
3609  // the yellow/red zones can be guarded.
3610  // Note that this can be 0, if no default stacksize was set.
3611  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3612
3613  Aix::libpthread_init();
3614
3615  if (MaxFDLimit) {
3616    // Set the number of file descriptors to max. print out error
3617    // if getrlimit/setrlimit fails but continue regardless.
3618    struct rlimit nbr_files;
3619    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3620    if (status != 0) {
3621      if (PrintMiscellaneous && (Verbose || WizardMode))
3622        perror("os::init_2 getrlimit failed");
3623    } else {
3624      nbr_files.rlim_cur = nbr_files.rlim_max;
3625      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3626      if (status != 0) {
3627        if (PrintMiscellaneous && (Verbose || WizardMode))
3628          perror("os::init_2 setrlimit failed");
3629      }
3630    }
3631  }
3632
3633  if (PerfAllowAtExitRegistration) {
3634    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3635    // Atexit functions can be delayed until process exit time, which
3636    // can be problematic for embedded VM situations. Embedded VMs should
3637    // call DestroyJavaVM() to assure that VM resources are released.
3638
3639    // Note: perfMemory_exit_helper atexit function may be removed in
3640    // the future if the appropriate cleanup code can be added to the
3641    // VM_Exit VMOperation's doit method.
3642    if (atexit(perfMemory_exit_helper) != 0) {
3643      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3644    }
3645  }
3646
3647  return JNI_OK;
3648}
3649
3650// Mark the polling page as unreadable
3651void os::make_polling_page_unreadable(void) {
3652  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3653    fatal("Could not disable polling page");
3654  }
3655};
3656
3657// Mark the polling page as readable
3658void os::make_polling_page_readable(void) {
3659  // Changed according to os_linux.cpp.
3660  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3661    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3662  }
3663};
3664
3665int os::active_processor_count() {
3666  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3667  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3668  return online_cpus;
3669}
3670
3671void os::set_native_thread_name(const char *name) {
3672  // Not yet implemented.
3673  return;
3674}
3675
3676bool os::distribute_processes(uint length, uint* distribution) {
3677  // Not yet implemented.
3678  return false;
3679}
3680
3681bool os::bind_to_processor(uint processor_id) {
3682  // Not yet implemented.
3683  return false;
3684}
3685
3686void os::SuspendedThreadTask::internal_do_task() {
3687  if (do_suspend(_thread->osthread())) {
3688    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3689    do_task(context);
3690    do_resume(_thread->osthread());
3691  }
3692}
3693
3694class PcFetcher : public os::SuspendedThreadTask {
3695public:
3696  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3697  ExtendedPC result();
3698protected:
3699  void do_task(const os::SuspendedThreadTaskContext& context);
3700private:
3701  ExtendedPC _epc;
3702};
3703
3704ExtendedPC PcFetcher::result() {
3705  guarantee(is_done(), "task is not done yet.");
3706  return _epc;
3707}
3708
3709void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3710  Thread* thread = context.thread();
3711  OSThread* osthread = thread->osthread();
3712  if (osthread->ucontext() != NULL) {
3713    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3714  } else {
3715    // NULL context is unexpected, double-check this is the VMThread.
3716    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3717  }
3718}
3719
3720// Suspends the target using the signal mechanism and then grabs the PC before
3721// resuming the target. Used by the flat-profiler only
3722ExtendedPC os::get_thread_pc(Thread* thread) {
3723  // Make sure that it is called by the watcher for the VMThread.
3724  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3725  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3726
3727  PcFetcher fetcher(thread);
3728  fetcher.run();
3729  return fetcher.result();
3730}
3731
3732////////////////////////////////////////////////////////////////////////////////
3733// debug support
3734
3735static address same_page(address x, address y) {
3736  intptr_t page_bits = -os::vm_page_size();
3737  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3738    return x;
3739  else if (x > y)
3740    return (address)(intptr_t(y) | ~page_bits) + 1;
3741  else
3742    return (address)(intptr_t(y) & page_bits);
3743}
3744
3745bool os::find(address addr, outputStream* st) {
3746
3747  st->print(PTR_FORMAT ": ", addr);
3748
3749  loaded_module_t lm;
3750  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3751      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3752    st->print("%s", lm.path);
3753    return true;
3754  }
3755
3756  return false;
3757}
3758
3759////////////////////////////////////////////////////////////////////////////////
3760// misc
3761
3762// This does not do anything on Aix. This is basically a hook for being
3763// able to use structured exception handling (thread-local exception filters)
3764// on, e.g., Win32.
3765void
3766os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3767                         JavaCallArguments* args, Thread* thread) {
3768  f(value, method, args, thread);
3769}
3770
3771void os::print_statistics() {
3772}
3773
3774bool os::message_box(const char* title, const char* message) {
3775  int i;
3776  fdStream err(defaultStream::error_fd());
3777  for (i = 0; i < 78; i++) err.print_raw("=");
3778  err.cr();
3779  err.print_raw_cr(title);
3780  for (i = 0; i < 78; i++) err.print_raw("-");
3781  err.cr();
3782  err.print_raw_cr(message);
3783  for (i = 0; i < 78; i++) err.print_raw("=");
3784  err.cr();
3785
3786  char buf[16];
3787  // Prevent process from exiting upon "read error" without consuming all CPU
3788  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3789
3790  return buf[0] == 'y' || buf[0] == 'Y';
3791}
3792
3793int os::stat(const char *path, struct stat *sbuf) {
3794  char pathbuf[MAX_PATH];
3795  if (strlen(path) > MAX_PATH - 1) {
3796    errno = ENAMETOOLONG;
3797    return -1;
3798  }
3799  os::native_path(strcpy(pathbuf, path));
3800  return ::stat(pathbuf, sbuf);
3801}
3802
3803bool os::check_heap(bool force) {
3804  return true;
3805}
3806
3807// Is a (classpath) directory empty?
3808bool os::dir_is_empty(const char* path) {
3809  DIR *dir = NULL;
3810  struct dirent *ptr;
3811
3812  dir = opendir(path);
3813  if (dir == NULL) return true;
3814
3815  /* Scan the directory */
3816  bool result = true;
3817  char buf[sizeof(struct dirent) + MAX_PATH];
3818  while (result && (ptr = ::readdir(dir)) != NULL) {
3819    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3820      result = false;
3821    }
3822  }
3823  closedir(dir);
3824  return result;
3825}
3826
3827// This code originates from JDK's sysOpen and open64_w
3828// from src/solaris/hpi/src/system_md.c
3829
3830int os::open(const char *path, int oflag, int mode) {
3831
3832  if (strlen(path) > MAX_PATH - 1) {
3833    errno = ENAMETOOLONG;
3834    return -1;
3835  }
3836  int fd;
3837
3838  fd = ::open64(path, oflag, mode);
3839  if (fd == -1) return -1;
3840
3841  // If the open succeeded, the file might still be a directory.
3842  {
3843    struct stat64 buf64;
3844    int ret = ::fstat64(fd, &buf64);
3845    int st_mode = buf64.st_mode;
3846
3847    if (ret != -1) {
3848      if ((st_mode & S_IFMT) == S_IFDIR) {
3849        errno = EISDIR;
3850        ::close(fd);
3851        return -1;
3852      }
3853    } else {
3854      ::close(fd);
3855      return -1;
3856    }
3857  }
3858
3859  // All file descriptors that are opened in the JVM and not
3860  // specifically destined for a subprocess should have the
3861  // close-on-exec flag set. If we don't set it, then careless 3rd
3862  // party native code might fork and exec without closing all
3863  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3864  // UNIXProcess.c), and this in turn might:
3865  //
3866  // - cause end-of-file to fail to be detected on some file
3867  //   descriptors, resulting in mysterious hangs, or
3868  //
3869  // - might cause an fopen in the subprocess to fail on a system
3870  //   suffering from bug 1085341.
3871  //
3872  // (Yes, the default setting of the close-on-exec flag is a Unix
3873  // design flaw.)
3874  //
3875  // See:
3876  // 1085341: 32-bit stdio routines should support file descriptors >255
3877  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3878  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3879#ifdef FD_CLOEXEC
3880  {
3881    int flags = ::fcntl(fd, F_GETFD);
3882    if (flags != -1)
3883      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3884  }
3885#endif
3886
3887  return fd;
3888}
3889
3890// create binary file, rewriting existing file if required
3891int os::create_binary_file(const char* path, bool rewrite_existing) {
3892  int oflags = O_WRONLY | O_CREAT;
3893  if (!rewrite_existing) {
3894    oflags |= O_EXCL;
3895  }
3896  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3897}
3898
3899// return current position of file pointer
3900jlong os::current_file_offset(int fd) {
3901  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3902}
3903
3904// move file pointer to the specified offset
3905jlong os::seek_to_file_offset(int fd, jlong offset) {
3906  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3907}
3908
3909// This code originates from JDK's sysAvailable
3910// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3911
3912int os::available(int fd, jlong *bytes) {
3913  jlong cur, end;
3914  int mode;
3915  struct stat64 buf64;
3916
3917  if (::fstat64(fd, &buf64) >= 0) {
3918    mode = buf64.st_mode;
3919    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3920      int n;
3921      if (::ioctl(fd, FIONREAD, &n) >= 0) {
3922        *bytes = n;
3923        return 1;
3924      }
3925    }
3926  }
3927  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3928    return 0;
3929  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3930    return 0;
3931  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3932    return 0;
3933  }
3934  *bytes = end - cur;
3935  return 1;
3936}
3937
3938// Map a block of memory.
3939char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3940                        char *addr, size_t bytes, bool read_only,
3941                        bool allow_exec) {
3942  int prot;
3943  int flags = MAP_PRIVATE;
3944
3945  if (read_only) {
3946    prot = PROT_READ;
3947    flags = MAP_SHARED;
3948  } else {
3949    prot = PROT_READ | PROT_WRITE;
3950    flags = MAP_PRIVATE;
3951  }
3952
3953  if (allow_exec) {
3954    prot |= PROT_EXEC;
3955  }
3956
3957  if (addr != NULL) {
3958    flags |= MAP_FIXED;
3959  }
3960
3961  // Allow anonymous mappings if 'fd' is -1.
3962  if (fd == -1) {
3963    flags |= MAP_ANONYMOUS;
3964  }
3965
3966  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3967                                     fd, file_offset);
3968  if (mapped_address == MAP_FAILED) {
3969    return NULL;
3970  }
3971  return mapped_address;
3972}
3973
3974// Remap a block of memory.
3975char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3976                          char *addr, size_t bytes, bool read_only,
3977                          bool allow_exec) {
3978  // same as map_memory() on this OS
3979  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3980                        allow_exec);
3981}
3982
3983// Unmap a block of memory.
3984bool os::pd_unmap_memory(char* addr, size_t bytes) {
3985  return munmap(addr, bytes) == 0;
3986}
3987
3988// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3989// are used by JVM M&M and JVMTI to get user+sys or user CPU time
3990// of a thread.
3991//
3992// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3993// the fast estimate available on the platform.
3994
3995jlong os::current_thread_cpu_time() {
3996  // return user + sys since the cost is the same
3997  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3998  assert(n >= 0, "negative CPU time");
3999  return n;
4000}
4001
4002jlong os::thread_cpu_time(Thread* thread) {
4003  // consistent with what current_thread_cpu_time() returns
4004  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4005  assert(n >= 0, "negative CPU time");
4006  return n;
4007}
4008
4009jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4010  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4011  assert(n >= 0, "negative CPU time");
4012  return n;
4013}
4014
4015static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4016  bool error = false;
4017
4018  jlong sys_time = 0;
4019  jlong user_time = 0;
4020
4021  // Reimplemented using getthrds64().
4022  //
4023  // Works like this:
4024  // For the thread in question, get the kernel thread id. Then get the
4025  // kernel thread statistics using that id.
4026  //
4027  // This only works of course when no pthread scheduling is used,
4028  // i.e. there is a 1:1 relationship to kernel threads.
4029  // On AIX, see AIXTHREAD_SCOPE variable.
4030
4031  pthread_t pthtid = thread->osthread()->pthread_id();
4032
4033  // retrieve kernel thread id for the pthread:
4034  tid64_t tid = 0;
4035  struct __pthrdsinfo pinfo;
4036  // I just love those otherworldly IBM APIs which force me to hand down
4037  // dummy buffers for stuff I dont care for...
4038  char dummy[1];
4039  int dummy_size = sizeof(dummy);
4040  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4041                          dummy, &dummy_size) == 0) {
4042    tid = pinfo.__pi_tid;
4043  } else {
4044    tty->print_cr("pthread_getthrds_np failed.");
4045    error = true;
4046  }
4047
4048  // retrieve kernel timing info for that kernel thread
4049  if (!error) {
4050    struct thrdentry64 thrdentry;
4051    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4052      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4053      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4054    } else {
4055      tty->print_cr("pthread_getthrds_np failed.");
4056      error = true;
4057    }
4058  }
4059
4060  if (p_sys_time) {
4061    *p_sys_time = sys_time;
4062  }
4063
4064  if (p_user_time) {
4065    *p_user_time = user_time;
4066  }
4067
4068  if (error) {
4069    return false;
4070  }
4071
4072  return true;
4073}
4074
4075jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4076  jlong sys_time;
4077  jlong user_time;
4078
4079  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4080    return -1;
4081  }
4082
4083  return user_sys_cpu_time ? sys_time + user_time : user_time;
4084}
4085
4086void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4087  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4088  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4089  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4090  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4091}
4092
4093void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4094  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4095  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4096  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4097  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4098}
4099
4100bool os::is_thread_cpu_time_supported() {
4101  return true;
4102}
4103
4104// System loadavg support. Returns -1 if load average cannot be obtained.
4105// For now just return the system wide load average (no processor sets).
4106int os::loadavg(double values[], int nelem) {
4107
4108  // Implemented using libperfstat on AIX.
4109
4110  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4111  guarantee(values, "argument error");
4112
4113  if (os::Aix::on_pase()) {
4114    Unimplemented();
4115    return -1;
4116  } else {
4117    // AIX: use libperfstat
4118    //
4119    // See also:
4120    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4121    // /usr/include/libperfstat.h:
4122
4123    // Use the already AIX version independent get_cpuinfo.
4124    os::Aix::cpuinfo_t ci;
4125    if (os::Aix::get_cpuinfo(&ci)) {
4126      for (int i = 0; i < nelem; i++) {
4127        values[i] = ci.loadavg[i];
4128      }
4129    } else {
4130      return -1;
4131    }
4132    return nelem;
4133  }
4134}
4135
4136void os::pause() {
4137  char filename[MAX_PATH];
4138  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4139    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4140  } else {
4141    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4142  }
4143
4144  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4145  if (fd != -1) {
4146    struct stat buf;
4147    ::close(fd);
4148    while (::stat(filename, &buf) == 0) {
4149      (void)::poll(NULL, 0, 100);
4150    }
4151  } else {
4152    jio_fprintf(stderr,
4153      "Could not open pause file '%s', continuing immediately.\n", filename);
4154  }
4155}
4156
4157bool os::Aix::is_primordial_thread() {
4158  if (pthread_self() == (pthread_t)1) {
4159    return true;
4160  } else {
4161    return false;
4162  }
4163}
4164
4165// OS recognitions (PASE/AIX, OS level) call this before calling any
4166// one of Aix::on_pase(), Aix::os_version() static
4167void os::Aix::initialize_os_info() {
4168
4169  assert(_on_pase == -1 && _os_version == -1, "already called.");
4170
4171  struct utsname uts;
4172  memset(&uts, 0, sizeof(uts));
4173  strcpy(uts.sysname, "?");
4174  if (::uname(&uts) == -1) {
4175    trc("uname failed (%d)", errno);
4176    guarantee(0, "Could not determine whether we run on AIX or PASE");
4177  } else {
4178    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4179               "node \"%s\" machine \"%s\"\n",
4180               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4181    const int major = atoi(uts.version);
4182    assert(major > 0, "invalid OS version");
4183    const int minor = atoi(uts.release);
4184    assert(minor > 0, "invalid OS release");
4185    _os_version = (major << 8) | minor;
4186    if (strcmp(uts.sysname, "OS400") == 0) {
4187      Unimplemented();
4188    } else if (strcmp(uts.sysname, "AIX") == 0) {
4189      // We run on AIX. We do not support versions older than AIX 5.3.
4190      _on_pase = 0;
4191      if (_os_version < 0x0503) {
4192        trc("AIX release older than AIX 5.3 not supported.");
4193        assert(false, "AIX release too old.");
4194      } else {
4195        trcVerbose("We run on AIX %d.%d\n", major, minor);
4196      }
4197    } else {
4198      assert(false, "unknown OS");
4199    }
4200  }
4201
4202  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4203} // end: os::Aix::initialize_os_info()
4204
4205// Scan environment for important settings which might effect the VM.
4206// Trace out settings. Warn about invalid settings and/or correct them.
4207//
4208// Must run after os::Aix::initialue_os_info().
4209void os::Aix::scan_environment() {
4210
4211  char* p;
4212  int rc;
4213
4214  // Warn explicity if EXTSHM=ON is used. That switch changes how
4215  // System V shared memory behaves. One effect is that page size of
4216  // shared memory cannot be change dynamically, effectivly preventing
4217  // large pages from working.
4218  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4219  // recommendation is (in OSS notes) to switch it off.
4220  p = ::getenv("EXTSHM");
4221  if (Verbose) {
4222    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4223  }
4224  if (p && strcasecmp(p, "ON") == 0) {
4225    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4226    _extshm = 1;
4227  } else {
4228    _extshm = 0;
4229  }
4230
4231  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4232  // Not tested, not supported.
4233  //
4234  // Note that it might be worth the trouble to test and to require it, if only to
4235  // get useful return codes for mprotect.
4236  //
4237  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4238  // exec() ? before loading the libjvm ? ....)
4239  p = ::getenv("XPG_SUS_ENV");
4240  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4241  if (p && strcmp(p, "ON") == 0) {
4242    _xpg_sus_mode = 1;
4243    trc("Unsupported setting: XPG_SUS_ENV=ON");
4244    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4245    // clobber address ranges. If we ever want to support that, we have to do some
4246    // testing first.
4247    guarantee(false, "XPG_SUS_ENV=ON not supported");
4248  } else {
4249    _xpg_sus_mode = 0;
4250  }
4251
4252  // Switch off AIX internal (pthread) guard pages. This has
4253  // immediate effect for any pthread_create calls which follow.
4254  p = ::getenv("AIXTHREAD_GUARDPAGES");
4255  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4256  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4257  guarantee(rc == 0, "");
4258
4259} // end: os::Aix::scan_environment()
4260
4261// PASE: initialize the libo4 library (AS400 PASE porting library).
4262void os::Aix::initialize_libo4() {
4263  Unimplemented();
4264}
4265
4266// AIX: initialize the libperfstat library (we load this dynamically
4267// because it is only available on AIX.
4268void os::Aix::initialize_libperfstat() {
4269
4270  assert(os::Aix::on_aix(), "AIX only");
4271
4272  if (!libperfstat::init()) {
4273    trc("libperfstat initialization failed.");
4274    assert(false, "libperfstat initialization failed");
4275  } else {
4276    if (Verbose) {
4277      fprintf(stderr, "libperfstat initialized.\n");
4278    }
4279  }
4280} // end: os::Aix::initialize_libperfstat
4281
4282/////////////////////////////////////////////////////////////////////////////
4283// thread stack
4284
4285// Function to query the current stack size using pthread_getthrds_np.
4286static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4287  // This only works when invoked on a pthread. As we agreed not to use
4288  // primordial threads anyway, I assert here.
4289  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4290
4291  // Information about this api can be found (a) in the pthread.h header and
4292  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4293  //
4294  // The use of this API to find out the current stack is kind of undefined.
4295  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4296  // enough for cases where I let the pthread library create its stacks. For cases
4297  // where I create an own stack and pass this to pthread_create, it seems not to
4298  // work (the returned stack size in that case is 0).
4299
4300  pthread_t tid = pthread_self();
4301  struct __pthrdsinfo pinfo;
4302  char dummy[1]; // We only need this to satisfy the api and to not get E.
4303  int dummy_size = sizeof(dummy);
4304
4305  memset(&pinfo, 0, sizeof(pinfo));
4306
4307  const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4308                                     sizeof(pinfo), dummy, &dummy_size);
4309
4310  if (rc != 0) {
4311    assert0(false);
4312    trcVerbose("pthread_getthrds_np failed (%d)", rc);
4313    return false;
4314  }
4315  guarantee0(pinfo.__pi_stackend);
4316
4317  // The following can happen when invoking pthread_getthrds_np on a pthread running
4318  // on a user provided stack (when handing down a stack to pthread create, see
4319  // pthread_attr_setstackaddr).
4320  // Not sure what to do here - I feel inclined to forbid this use case completely.
4321  guarantee0(pinfo.__pi_stacksize);
4322
4323  // Note: the pthread stack on AIX seems to look like this:
4324  //
4325  // ---------------------   real base ? at page border ?
4326  //
4327  //     pthread internal data, like ~2K, see also
4328  //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4329  //
4330  // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4331  //
4332  //     stack
4333  //      ....
4334  //
4335  //     stack
4336  //
4337  // ---------------------   __pi_stackend  - __pi_stacksize
4338  //
4339  //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4340  // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4341  //
4342  //   AIX guard pages (?)
4343  //
4344
4345  // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4346  // __pi_stackend however is almost never page aligned.
4347  //
4348
4349  if (p_stack_base) {
4350    (*p_stack_base) = (address) (pinfo.__pi_stackend);
4351  }
4352
4353  if (p_stack_size) {
4354    (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4355  }
4356
4357  return true;
4358}
4359
4360// Get the current stack base from the OS (actually, the pthread library).
4361address os::current_stack_base() {
4362  address p;
4363  query_stack_dimensions(&p, 0);
4364  return p;
4365}
4366
4367// Get the current stack size from the OS (actually, the pthread library).
4368size_t os::current_stack_size() {
4369  size_t s;
4370  query_stack_dimensions(0, &s);
4371  return s;
4372}
4373
4374// Refer to the comments in os_solaris.cpp park-unpark.
4375//
4376// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4377// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4378// For specifics regarding the bug see GLIBC BUGID 261237 :
4379//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4380// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4381// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4382// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4383// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4384// and monitorenter when we're using 1-0 locking. All those operations may result in
4385// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4386// of libpthread avoids the problem, but isn't practical.
4387//
4388// Possible remedies:
4389//
4390// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4391//      This is palliative and probabilistic, however. If the thread is preempted
4392//      between the call to compute_abstime() and pthread_cond_timedwait(), more
4393//      than the minimum period may have passed, and the abstime may be stale (in the
4394//      past) resultin in a hang. Using this technique reduces the odds of a hang
4395//      but the JVM is still vulnerable, particularly on heavily loaded systems.
4396//
4397// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4398//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4399//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4400//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4401//      thread.
4402//
4403// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4404//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4405//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4406//      This also works well. In fact it avoids kernel-level scalability impediments
4407//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4408//      timers in a graceful fashion.
4409//
4410// 4.   When the abstime value is in the past it appears that control returns
4411//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4412//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4413//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4414//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4415//      It may be possible to avoid reinitialization by checking the return
4416//      value from pthread_cond_timedwait(). In addition to reinitializing the
4417//      condvar we must establish the invariant that cond_signal() is only called
4418//      within critical sections protected by the adjunct mutex. This prevents
4419//      cond_signal() from "seeing" a condvar that's in the midst of being
4420//      reinitialized or that is corrupt. Sadly, this invariant obviates the
4421//      desirable signal-after-unlock optimization that avoids futile context switching.
4422//
4423//      I'm also concerned that some versions of NTPL might allocate an auxilliary
4424//      structure when a condvar is used or initialized. cond_destroy() would
4425//      release the helper structure. Our reinitialize-after-timedwait fix
4426//      put excessive stress on malloc/free and locks protecting the c-heap.
4427//
4428// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4429// It may be possible to refine (4) by checking the kernel and NTPL verisons
4430// and only enabling the work-around for vulnerable environments.
4431
4432// utility to compute the abstime argument to timedwait:
4433// millis is the relative timeout time
4434// abstime will be the absolute timeout time
4435// TODO: replace compute_abstime() with unpackTime()
4436
4437static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4438  if (millis < 0) millis = 0;
4439  struct timeval now;
4440  int status = gettimeofday(&now, NULL);
4441  assert(status == 0, "gettimeofday");
4442  jlong seconds = millis / 1000;
4443  millis %= 1000;
4444  if (seconds > 50000000) { // see man cond_timedwait(3T)
4445    seconds = 50000000;
4446  }
4447  abstime->tv_sec = now.tv_sec  + seconds;
4448  long       usec = now.tv_usec + millis * 1000;
4449  if (usec >= 1000000) {
4450    abstime->tv_sec += 1;
4451    usec -= 1000000;
4452  }
4453  abstime->tv_nsec = usec * 1000;
4454  return abstime;
4455}
4456
4457// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4458// Conceptually TryPark() should be equivalent to park(0).
4459
4460int os::PlatformEvent::TryPark() {
4461  for (;;) {
4462    const int v = _Event;
4463    guarantee ((v == 0) || (v == 1), "invariant");
4464    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4465  }
4466}
4467
4468void os::PlatformEvent::park() {       // AKA "down()"
4469  // Invariant: Only the thread associated with the Event/PlatformEvent
4470  // may call park().
4471  // TODO: assert that _Assoc != NULL or _Assoc == Self
4472  int v;
4473  for (;;) {
4474    v = _Event;
4475    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4476  }
4477  guarantee (v >= 0, "invariant");
4478  if (v == 0) {
4479    // Do this the hard way by blocking ...
4480    int status = pthread_mutex_lock(_mutex);
4481    assert_status(status == 0, status, "mutex_lock");
4482    guarantee (_nParked == 0, "invariant");
4483    ++ _nParked;
4484    while (_Event < 0) {
4485      status = pthread_cond_wait(_cond, _mutex);
4486      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4487    }
4488    -- _nParked;
4489
4490    // In theory we could move the ST of 0 into _Event past the unlock(),
4491    // but then we'd need a MEMBAR after the ST.
4492    _Event = 0;
4493    status = pthread_mutex_unlock(_mutex);
4494    assert_status(status == 0, status, "mutex_unlock");
4495  }
4496  guarantee (_Event >= 0, "invariant");
4497}
4498
4499int os::PlatformEvent::park(jlong millis) {
4500  guarantee (_nParked == 0, "invariant");
4501
4502  int v;
4503  for (;;) {
4504    v = _Event;
4505    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4506  }
4507  guarantee (v >= 0, "invariant");
4508  if (v != 0) return OS_OK;
4509
4510  // We do this the hard way, by blocking the thread.
4511  // Consider enforcing a minimum timeout value.
4512  struct timespec abst;
4513  compute_abstime(&abst, millis);
4514
4515  int ret = OS_TIMEOUT;
4516  int status = pthread_mutex_lock(_mutex);
4517  assert_status(status == 0, status, "mutex_lock");
4518  guarantee (_nParked == 0, "invariant");
4519  ++_nParked;
4520
4521  // Object.wait(timo) will return because of
4522  // (a) notification
4523  // (b) timeout
4524  // (c) thread.interrupt
4525  //
4526  // Thread.interrupt and object.notify{All} both call Event::set.
4527  // That is, we treat thread.interrupt as a special case of notification.
4528  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4529  // We assume all ETIME returns are valid.
4530  //
4531  // TODO: properly differentiate simultaneous notify+interrupt.
4532  // In that case, we should propagate the notify to another waiter.
4533
4534  while (_Event < 0) {
4535    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4536    assert_status(status == 0 || status == ETIMEDOUT,
4537                  status, "cond_timedwait");
4538    if (!FilterSpuriousWakeups) break;         // previous semantics
4539    if (status == ETIMEDOUT) break;
4540    // We consume and ignore EINTR and spurious wakeups.
4541  }
4542  --_nParked;
4543  if (_Event >= 0) {
4544     ret = OS_OK;
4545  }
4546  _Event = 0;
4547  status = pthread_mutex_unlock(_mutex);
4548  assert_status(status == 0, status, "mutex_unlock");
4549  assert (_nParked == 0, "invariant");
4550  return ret;
4551}
4552
4553void os::PlatformEvent::unpark() {
4554  int v, AnyWaiters;
4555  for (;;) {
4556    v = _Event;
4557    if (v > 0) {
4558      // The LD of _Event could have reordered or be satisfied
4559      // by a read-aside from this processor's write buffer.
4560      // To avoid problems execute a barrier and then
4561      // ratify the value.
4562      OrderAccess::fence();
4563      if (_Event == v) return;
4564      continue;
4565    }
4566    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4567  }
4568  if (v < 0) {
4569    // Wait for the thread associated with the event to vacate
4570    int status = pthread_mutex_lock(_mutex);
4571    assert_status(status == 0, status, "mutex_lock");
4572    AnyWaiters = _nParked;
4573
4574    if (AnyWaiters != 0) {
4575      // We intentional signal *after* dropping the lock
4576      // to avoid a common class of futile wakeups.
4577      status = pthread_cond_signal(_cond);
4578      assert_status(status == 0, status, "cond_signal");
4579    }
4580    // Mutex should be locked for pthread_cond_signal(_cond).
4581    status = pthread_mutex_unlock(_mutex);
4582    assert_status(status == 0, status, "mutex_unlock");
4583  }
4584
4585  // Note that we signal() _after dropping the lock for "immortal" Events.
4586  // This is safe and avoids a common class of futile wakeups. In rare
4587  // circumstances this can cause a thread to return prematurely from
4588  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4589  // simply re-test the condition and re-park itself.
4590}
4591
4592
4593// JSR166
4594// -------------------------------------------------------
4595
4596//
4597// The solaris and linux implementations of park/unpark are fairly
4598// conservative for now, but can be improved. They currently use a
4599// mutex/condvar pair, plus a a count.
4600// Park decrements count if > 0, else does a condvar wait. Unpark
4601// sets count to 1 and signals condvar. Only one thread ever waits
4602// on the condvar. Contention seen when trying to park implies that someone
4603// is unparking you, so don't wait. And spurious returns are fine, so there
4604// is no need to track notifications.
4605//
4606
4607#define MAX_SECS 100000000
4608//
4609// This code is common to linux and solaris and will be moved to a
4610// common place in dolphin.
4611//
4612// The passed in time value is either a relative time in nanoseconds
4613// or an absolute time in milliseconds. Either way it has to be unpacked
4614// into suitable seconds and nanoseconds components and stored in the
4615// given timespec structure.
4616// Given time is a 64-bit value and the time_t used in the timespec is only
4617// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4618// overflow if times way in the future are given. Further on Solaris versions
4619// prior to 10 there is a restriction (see cond_timedwait) that the specified
4620// number of seconds, in abstime, is less than current_time + 100,000,000.
4621// As it will be 28 years before "now + 100000000" will overflow we can
4622// ignore overflow and just impose a hard-limit on seconds using the value
4623// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4624// years from "now".
4625//
4626
4627static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4628  assert (time > 0, "convertTime");
4629
4630  struct timeval now;
4631  int status = gettimeofday(&now, NULL);
4632  assert(status == 0, "gettimeofday");
4633
4634  time_t max_secs = now.tv_sec + MAX_SECS;
4635
4636  if (isAbsolute) {
4637    jlong secs = time / 1000;
4638    if (secs > max_secs) {
4639      absTime->tv_sec = max_secs;
4640    }
4641    else {
4642      absTime->tv_sec = secs;
4643    }
4644    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4645  }
4646  else {
4647    jlong secs = time / NANOSECS_PER_SEC;
4648    if (secs >= MAX_SECS) {
4649      absTime->tv_sec = max_secs;
4650      absTime->tv_nsec = 0;
4651    }
4652    else {
4653      absTime->tv_sec = now.tv_sec + secs;
4654      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4655      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4656        absTime->tv_nsec -= NANOSECS_PER_SEC;
4657        ++absTime->tv_sec; // note: this must be <= max_secs
4658      }
4659    }
4660  }
4661  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4662  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4663  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4664  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4665}
4666
4667void Parker::park(bool isAbsolute, jlong time) {
4668  // Optional fast-path check:
4669  // Return immediately if a permit is available.
4670  if (_counter > 0) {
4671    _counter = 0;
4672    OrderAccess::fence();
4673    return;
4674  }
4675
4676  Thread* thread = Thread::current();
4677  assert(thread->is_Java_thread(), "Must be JavaThread");
4678  JavaThread *jt = (JavaThread *)thread;
4679
4680  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4681  // Check interrupt before trying to wait
4682  if (Thread::is_interrupted(thread, false)) {
4683    return;
4684  }
4685
4686  // Next, demultiplex/decode time arguments
4687  timespec absTime;
4688  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4689    return;
4690  }
4691  if (time > 0) {
4692    unpackTime(&absTime, isAbsolute, time);
4693  }
4694
4695  // Enter safepoint region
4696  // Beware of deadlocks such as 6317397.
4697  // The per-thread Parker:: mutex is a classic leaf-lock.
4698  // In particular a thread must never block on the Threads_lock while
4699  // holding the Parker:: mutex. If safepoints are pending both the
4700  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4701  ThreadBlockInVM tbivm(jt);
4702
4703  // Don't wait if cannot get lock since interference arises from
4704  // unblocking. Also. check interrupt before trying wait
4705  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4706    return;
4707  }
4708
4709  int status;
4710  if (_counter > 0) { // no wait needed
4711    _counter = 0;
4712    status = pthread_mutex_unlock(_mutex);
4713    assert (status == 0, "invariant");
4714    OrderAccess::fence();
4715    return;
4716  }
4717
4718#ifdef ASSERT
4719  // Don't catch signals while blocked; let the running threads have the signals.
4720  // (This allows a debugger to break into the running thread.)
4721  sigset_t oldsigs;
4722  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4723  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4724#endif
4725
4726  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4727  jt->set_suspend_equivalent();
4728  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4729
4730  if (time == 0) {
4731    status = pthread_cond_wait (_cond, _mutex);
4732  } else {
4733    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4734    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4735      pthread_cond_destroy (_cond);
4736      pthread_cond_init    (_cond, NULL);
4737    }
4738  }
4739  assert_status(status == 0 || status == EINTR ||
4740                status == ETIME || status == ETIMEDOUT,
4741                status, "cond_timedwait");
4742
4743#ifdef ASSERT
4744  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4745#endif
4746
4747  _counter = 0;
4748  status = pthread_mutex_unlock(_mutex);
4749  assert_status(status == 0, status, "invariant");
4750  // If externally suspended while waiting, re-suspend
4751  if (jt->handle_special_suspend_equivalent_condition()) {
4752    jt->java_suspend_self();
4753  }
4754
4755  OrderAccess::fence();
4756}
4757
4758void Parker::unpark() {
4759  int s, status;
4760  status = pthread_mutex_lock(_mutex);
4761  assert (status == 0, "invariant");
4762  s = _counter;
4763  _counter = 1;
4764  if (s < 1) {
4765    if (WorkAroundNPTLTimedWaitHang) {
4766      status = pthread_cond_signal (_cond);
4767      assert (status == 0, "invariant");
4768      status = pthread_mutex_unlock(_mutex);
4769      assert (status == 0, "invariant");
4770    } else {
4771      status = pthread_mutex_unlock(_mutex);
4772      assert (status == 0, "invariant");
4773      status = pthread_cond_signal (_cond);
4774      assert (status == 0, "invariant");
4775    }
4776  } else {
4777    pthread_mutex_unlock(_mutex);
4778    assert (status == 0, "invariant");
4779  }
4780}
4781
4782extern char** environ;
4783
4784// Run the specified command in a separate process. Return its exit value,
4785// or -1 on failure (e.g. can't fork a new process).
4786// Unlike system(), this function can be called from signal handler. It
4787// doesn't block SIGINT et al.
4788int os::fork_and_exec(char* cmd) {
4789  char * argv[4] = {"sh", "-c", cmd, NULL};
4790
4791  pid_t pid = fork();
4792
4793  if (pid < 0) {
4794    // fork failed
4795    return -1;
4796
4797  } else if (pid == 0) {
4798    // child process
4799
4800    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4801    execve("/usr/bin/sh", argv, environ);
4802
4803    // execve failed
4804    _exit(-1);
4805
4806  } else {
4807    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4808    // care about the actual exit code, for now.
4809
4810    int status;
4811
4812    // Wait for the child process to exit. This returns immediately if
4813    // the child has already exited. */
4814    while (waitpid(pid, &status, 0) < 0) {
4815      switch (errno) {
4816        case ECHILD: return 0;
4817        case EINTR: break;
4818        default: return -1;
4819      }
4820    }
4821
4822    if (WIFEXITED(status)) {
4823      // The child exited normally; get its exit code.
4824      return WEXITSTATUS(status);
4825    } else if (WIFSIGNALED(status)) {
4826      // The child exited because of a signal.
4827      // The best value to return is 0x80 + signal number,
4828      // because that is what all Unix shells do, and because
4829      // it allows callers to distinguish between process exit and
4830      // process death by signal.
4831      return 0x80 + WTERMSIG(status);
4832    } else {
4833      // Unknown exit code; pass it through.
4834      return status;
4835    }
4836  }
4837  return -1;
4838}
4839
4840// is_headless_jre()
4841//
4842// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4843// in order to report if we are running in a headless jre.
4844//
4845// Since JDK8 xawt/libmawt.so is moved into the same directory
4846// as libawt.so, and renamed libawt_xawt.so
4847bool os::is_headless_jre() {
4848  struct stat statbuf;
4849  char buf[MAXPATHLEN];
4850  char libmawtpath[MAXPATHLEN];
4851  const char *xawtstr = "/xawt/libmawt.so";
4852  const char *new_xawtstr = "/libawt_xawt.so";
4853
4854  char *p;
4855
4856  // Get path to libjvm.so
4857  os::jvm_path(buf, sizeof(buf));
4858
4859  // Get rid of libjvm.so
4860  p = strrchr(buf, '/');
4861  if (p == NULL) return false;
4862  else *p = '\0';
4863
4864  // Get rid of client or server
4865  p = strrchr(buf, '/');
4866  if (p == NULL) return false;
4867  else *p = '\0';
4868
4869  // check xawt/libmawt.so
4870  strcpy(libmawtpath, buf);
4871  strcat(libmawtpath, xawtstr);
4872  if (::stat(libmawtpath, &statbuf) == 0) return false;
4873
4874  // check libawt_xawt.so
4875  strcpy(libmawtpath, buf);
4876  strcat(libmawtpath, new_xawtstr);
4877  if (::stat(libmawtpath, &statbuf) == 0) return false;
4878
4879  return true;
4880}
4881
4882// Get the default path to the core file
4883// Returns the length of the string
4884int os::get_core_path(char* buffer, size_t bufferSize) {
4885  const char* p = get_current_directory(buffer, bufferSize);
4886
4887  if (p == NULL) {
4888    assert(p != NULL, "failed to get current directory");
4889    return 0;
4890  }
4891
4892  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4893                                               p, current_process_id());
4894
4895  return strlen(buffer);
4896}
4897
4898#ifndef PRODUCT
4899void TestReserveMemorySpecial_test() {
4900  // No tests available for this platform
4901}
4902#endif
4903
4904bool os::start_debugging(char *buf, int buflen)��{
4905  int len = (int)strlen(buf);
4906  char *p = &buf[len];
4907
4908  jio_snprintf(p, buflen -len,
4909                 "\n\n"
4910                 "Do you want to debug the problem?\n\n"
4911                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4912                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4913                 "Otherwise, press RETURN to abort...",
4914                 os::current_process_id(),
4915                 os::current_thread_id(), thread_self());
4916
4917  bool yes = os::message_box("Unexpected Error", buf);
4918
4919  if (yes) {
4920    // yes, user asked VM to launch debugger
4921    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4922
4923    os::fork_and_exec(buf);
4924    yes = false;
4925  }
4926  return yes;
4927}
4928