os_aix.cpp revision 11236:aa11081b8307
1/*
2 * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2012, 2016 SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "logging/log.hpp"
40#include "libo4.hpp"
41#include "libperfstat_aix.hpp"
42#include "libodm_aix.hpp"
43#include "loadlib_aix.hpp"
44#include "memory/allocation.inline.hpp"
45#include "memory/filemap.hpp"
46#include "misc_aix.hpp"
47#include "mutex_aix.inline.hpp"
48#include "oops/oop.inline.hpp"
49#include "os_aix.inline.hpp"
50#include "os_share_aix.hpp"
51#include "porting_aix.hpp"
52#include "prims/jniFastGetField.hpp"
53#include "prims/jvm.h"
54#include "prims/jvm_misc.hpp"
55#include "runtime/arguments.hpp"
56#include "runtime/atomic.inline.hpp"
57#include "runtime/extendedPC.hpp"
58#include "runtime/globals.hpp"
59#include "runtime/interfaceSupport.hpp"
60#include "runtime/java.hpp"
61#include "runtime/javaCalls.hpp"
62#include "runtime/mutexLocker.hpp"
63#include "runtime/objectMonitor.hpp"
64#include "runtime/orderAccess.inline.hpp"
65#include "runtime/os.hpp"
66#include "runtime/osThread.hpp"
67#include "runtime/perfMemory.hpp"
68#include "runtime/sharedRuntime.hpp"
69#include "runtime/statSampler.hpp"
70#include "runtime/stubRoutines.hpp"
71#include "runtime/thread.inline.hpp"
72#include "runtime/threadCritical.hpp"
73#include "runtime/timer.hpp"
74#include "runtime/vm_version.hpp"
75#include "services/attachListener.hpp"
76#include "services/runtimeService.hpp"
77#include "utilities/decoder.hpp"
78#include "utilities/defaultStream.hpp"
79#include "utilities/events.hpp"
80#include "utilities/growableArray.hpp"
81#include "utilities/vmError.hpp"
82
83// put OS-includes here (sorted alphabetically)
84#include <errno.h>
85#include <fcntl.h>
86#include <inttypes.h>
87#include <poll.h>
88#include <procinfo.h>
89#include <pthread.h>
90#include <pwd.h>
91#include <semaphore.h>
92#include <signal.h>
93#include <stdint.h>
94#include <stdio.h>
95#include <string.h>
96#include <unistd.h>
97#include <sys/ioctl.h>
98#include <sys/ipc.h>
99#include <sys/mman.h>
100#include <sys/resource.h>
101#include <sys/select.h>
102#include <sys/shm.h>
103#include <sys/socket.h>
104#include <sys/stat.h>
105#include <sys/sysinfo.h>
106#include <sys/systemcfg.h>
107#include <sys/time.h>
108#include <sys/times.h>
109#include <sys/types.h>
110#include <sys/utsname.h>
111#include <sys/vminfo.h>
112#include <sys/wait.h>
113
114// Missing prototypes for various system APIs.
115extern "C"
116int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
117
118#if !defined(_AIXVERSION_610)
119extern "C" int getthrds64(pid_t, struct thrdentry64*, int, tid64_t*, int);
120extern "C" int getprocs64(procentry64*, int, fdsinfo*, int, pid_t*, int);
121extern "C" int getargs   (procsinfo*, int, char*, int);
122#endif
123
124#define MAX_PATH (2 * K)
125
126// for timer info max values which include all bits
127#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
128// for multipage initialization error analysis (in 'g_multipage_error')
129#define ERROR_MP_OS_TOO_OLD                          100
130#define ERROR_MP_EXTSHM_ACTIVE                       101
131#define ERROR_MP_VMGETINFO_FAILED                    102
132#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
133
134// Query dimensions of the stack of the calling thread.
135static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
136static address resolve_function_descriptor_to_code_pointer(address p);
137
138static void vmembk_print_on(outputStream* os);
139
140////////////////////////////////////////////////////////////////////////////////
141// global variables (for a description see os_aix.hpp)
142
143julong    os::Aix::_physical_memory = 0;
144
145pthread_t os::Aix::_main_thread = ((pthread_t)0);
146int       os::Aix::_page_size = -1;
147
148// -1 = uninitialized, 0 if AIX, 1 if OS/400 pase
149int       os::Aix::_on_pase = -1;
150
151// 0 = uninitialized, otherwise 32 bit number:
152//  0xVVRRTTSS
153//  VV - major version
154//  RR - minor version
155//  TT - tech level, if known, 0 otherwise
156//  SS - service pack, if known, 0 otherwise
157uint32_t  os::Aix::_os_version = 0;
158
159int       os::Aix::_stack_page_size = -1;
160
161// -1 = uninitialized, 0 - no, 1 - yes
162int       os::Aix::_xpg_sus_mode = -1;
163
164// -1 = uninitialized, 0 - no, 1 - yes
165int       os::Aix::_extshm = -1;
166
167////////////////////////////////////////////////////////////////////////////////
168// local variables
169
170static jlong    initial_time_count = 0;
171static int      clock_tics_per_sec = 100;
172static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
173static bool     check_signals      = true;
174static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
175static sigset_t SR_sigset;
176
177// Process break recorded at startup.
178static address g_brk_at_startup = NULL;
179
180// This describes the state of multipage support of the underlying
181// OS. Note that this is of no interest to the outsize world and
182// therefore should not be defined in AIX class.
183//
184// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
185// latter two (16M "large" resp. 16G "huge" pages) require special
186// setup and are normally not available.
187//
188// AIX supports multiple page sizes per process, for:
189//  - Stack (of the primordial thread, so not relevant for us)
190//  - Data - data, bss, heap, for us also pthread stacks
191//  - Text - text code
192//  - shared memory
193//
194// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
195// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
196//
197// For shared memory, page size can be set dynamically via
198// shmctl(). Different shared memory regions can have different page
199// sizes.
200//
201// More information can be found at AIBM info center:
202//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
203//
204static struct {
205  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
206  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
207  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
208  size_t pthr_stack_pagesize; // stack page size of pthread threads
209  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
210  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
211  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
212  int error;                  // Error describing if something went wrong at multipage init.
213} g_multipage_support = {
214  (size_t) -1,
215  (size_t) -1,
216  (size_t) -1,
217  (size_t) -1,
218  (size_t) -1,
219  false, false,
220  0
221};
222
223// We must not accidentally allocate memory close to the BRK - even if
224// that would work - because then we prevent the BRK segment from
225// growing which may result in a malloc OOM even though there is
226// enough memory. The problem only arises if we shmat() or mmap() at
227// a specific wish address, e.g. to place the heap in a
228// compressed-oops-friendly way.
229static bool is_close_to_brk(address a) {
230  assert0(g_brk_at_startup != NULL);
231  if (a >= g_brk_at_startup &&
232      a < (g_brk_at_startup + MaxExpectedDataSegmentSize)) {
233    return true;
234  }
235  return false;
236}
237
238julong os::available_memory() {
239  return Aix::available_memory();
240}
241
242julong os::Aix::available_memory() {
243  // Avoid expensive API call here, as returned value will always be null.
244  if (os::Aix::on_pase()) {
245    return 0x0LL;
246  }
247  os::Aix::meminfo_t mi;
248  if (os::Aix::get_meminfo(&mi)) {
249    return mi.real_free;
250  } else {
251    return ULONG_MAX;
252  }
253}
254
255julong os::physical_memory() {
256  return Aix::physical_memory();
257}
258
259// Return true if user is running as root.
260
261bool os::have_special_privileges() {
262  static bool init = false;
263  static bool privileges = false;
264  if (!init) {
265    privileges = (getuid() != geteuid()) || (getgid() != getegid());
266    init = true;
267  }
268  return privileges;
269}
270
271// Helper function, emulates disclaim64 using multiple 32bit disclaims
272// because we cannot use disclaim64() on AS/400 and old AIX releases.
273static bool my_disclaim64(char* addr, size_t size) {
274
275  if (size == 0) {
276    return true;
277  }
278
279  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
280  const unsigned int maxDisclaimSize = 0x40000000;
281
282  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
283  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
284
285  char* p = addr;
286
287  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
288    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
289      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
290      return false;
291    }
292    p += maxDisclaimSize;
293  }
294
295  if (lastDisclaimSize > 0) {
296    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
297      trcVerbose("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
298      return false;
299    }
300  }
301
302  return true;
303}
304
305// Cpu architecture string
306#if defined(PPC32)
307static char cpu_arch[] = "ppc";
308#elif defined(PPC64)
309static char cpu_arch[] = "ppc64";
310#else
311#error Add appropriate cpu_arch setting
312#endif
313
314// Wrap the function "vmgetinfo" which is not available on older OS releases.
315static int checked_vmgetinfo(void *out, int command, int arg) {
316  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
317    guarantee(false, "cannot call vmgetinfo on AS/400 older than V6R1");
318  }
319  return ::vmgetinfo(out, command, arg);
320}
321
322// Given an address, returns the size of the page backing that address.
323size_t os::Aix::query_pagesize(void* addr) {
324
325  if (os::Aix::on_pase() && os::Aix::os_version_short() < 0x0601) {
326    // AS/400 older than V6R1: no vmgetinfo here, default to 4K
327    return SIZE_4K;
328  }
329
330  vm_page_info pi;
331  pi.addr = (uint64_t)addr;
332  if (checked_vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
333    return pi.pagesize;
334  } else {
335    assert(false, "vmgetinfo failed to retrieve page size");
336    return SIZE_4K;
337  }
338}
339
340void os::Aix::initialize_system_info() {
341
342  // Get the number of online(logical) cpus instead of configured.
343  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
344  assert(_processor_count > 0, "_processor_count must be > 0");
345
346  // Retrieve total physical storage.
347  os::Aix::meminfo_t mi;
348  if (!os::Aix::get_meminfo(&mi)) {
349    assert(false, "os::Aix::get_meminfo failed.");
350  }
351  _physical_memory = (julong) mi.real_total;
352}
353
354// Helper function for tracing page sizes.
355static const char* describe_pagesize(size_t pagesize) {
356  switch (pagesize) {
357    case SIZE_4K : return "4K";
358    case SIZE_64K: return "64K";
359    case SIZE_16M: return "16M";
360    case SIZE_16G: return "16G";
361    default:
362      assert(false, "surprise");
363      return "??";
364  }
365}
366
367// Probe OS for multipage support.
368// Will fill the global g_multipage_support structure.
369// Must be called before calling os::large_page_init().
370static void query_multipage_support() {
371
372  guarantee(g_multipage_support.pagesize == -1,
373            "do not call twice");
374
375  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
376
377  // This really would surprise me.
378  assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
379
380  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
381  // Default data page size is defined either by linker options (-bdatapsize)
382  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
383  // default should be 4K.
384  {
385    void* p = ::malloc(SIZE_16M);
386    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
387    ::free(p);
388  }
389
390  // Query default shm page size (LDR_CNTRL SHMPSIZE).
391  // Note that this is pure curiosity. We do not rely on default page size but set
392  // our own page size after allocated.
393  {
394    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
395    guarantee(shmid != -1, "shmget failed");
396    void* p = ::shmat(shmid, NULL, 0);
397    ::shmctl(shmid, IPC_RMID, NULL);
398    guarantee(p != (void*) -1, "shmat failed");
399    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
400    ::shmdt(p);
401  }
402
403  // Before querying the stack page size, make sure we are not running as primordial
404  // thread (because primordial thread's stack may have different page size than
405  // pthread thread stacks). Running a VM on the primordial thread won't work for a
406  // number of reasons so we may just as well guarantee it here.
407  guarantee0(!os::Aix::is_primordial_thread());
408
409  // Query pthread stack page size. Should be the same as data page size because
410  // pthread stacks are allocated from C-Heap.
411  {
412    int dummy = 0;
413    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
414  }
415
416  // Query default text page size (LDR_CNTRL TEXTPSIZE).
417  {
418    address any_function =
419      resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
420    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
421  }
422
423  // Now probe for support of 64K pages and 16M pages.
424
425  // Before OS/400 V6R1, there is no support for pages other than 4K.
426  if (os::Aix::on_pase_V5R4_or_older()) {
427    trcVerbose("OS/400 < V6R1 - no large page support.");
428    g_multipage_support.error = ERROR_MP_OS_TOO_OLD;
429    goto query_multipage_support_end;
430  }
431
432  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
433  {
434    const int MAX_PAGE_SIZES = 4;
435    psize_t sizes[MAX_PAGE_SIZES];
436    const int num_psizes = checked_vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
437    if (num_psizes == -1) {
438      trcVerbose("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)", errno);
439      trcVerbose("disabling multipage support.");
440      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
441      goto query_multipage_support_end;
442    }
443    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
444    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
445    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
446    for (int i = 0; i < num_psizes; i ++) {
447      trcVerbose(" %s ", describe_pagesize(sizes[i]));
448    }
449
450    // Can we use 64K, 16M pages?
451    for (int i = 0; i < num_psizes; i ++) {
452      const size_t pagesize = sizes[i];
453      if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
454        continue;
455      }
456      bool can_use = false;
457      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
458      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
459        IPC_CREAT | S_IRUSR | S_IWUSR);
460      guarantee0(shmid != -1); // Should always work.
461      // Try to set pagesize.
462      struct shmid_ds shm_buf = { 0 };
463      shm_buf.shm_pagesize = pagesize;
464      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
465        const int en = errno;
466        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
467        trcVerbose("shmctl(SHM_PAGESIZE) failed with errno=%n",
468          errno);
469      } else {
470        // Attach and double check pageisze.
471        void* p = ::shmat(shmid, NULL, 0);
472        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
473        guarantee0(p != (void*) -1); // Should always work.
474        const size_t real_pagesize = os::Aix::query_pagesize(p);
475        if (real_pagesize != pagesize) {
476          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
477        } else {
478          can_use = true;
479        }
480        ::shmdt(p);
481      }
482      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
483      if (pagesize == SIZE_64K) {
484        g_multipage_support.can_use_64K_pages = can_use;
485      } else if (pagesize == SIZE_16M) {
486        g_multipage_support.can_use_16M_pages = can_use;
487      }
488    }
489
490  } // end: check which pages can be used for shared memory
491
492query_multipage_support_end:
493
494  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s",
495      describe_pagesize(g_multipage_support.pagesize));
496  trcVerbose("Data page size (C-Heap, bss, etc): %s",
497      describe_pagesize(g_multipage_support.datapsize));
498  trcVerbose("Text page size: %s",
499      describe_pagesize(g_multipage_support.textpsize));
500  trcVerbose("Thread stack page size (pthread): %s",
501      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
502  trcVerbose("Default shared memory page size: %s",
503      describe_pagesize(g_multipage_support.shmpsize));
504  trcVerbose("Can use 64K pages dynamically with shared meory: %s",
505      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
506  trcVerbose("Can use 16M pages dynamically with shared memory: %s",
507      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
508  trcVerbose("Multipage error details: %d",
509      g_multipage_support.error);
510
511  // sanity checks
512  assert0(g_multipage_support.pagesize == SIZE_4K);
513  assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
514  assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
515  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
516  assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
517
518}
519
520void os::init_system_properties_values() {
521
522#define DEFAULT_LIBPATH "/lib:/usr/lib"
523#define EXTENSIONS_DIR  "/lib/ext"
524
525  // Buffer that fits several sprintfs.
526  // Note that the space for the trailing null is provided
527  // by the nulls included by the sizeof operator.
528  const size_t bufsize =
529    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
530         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
531  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
532
533  // sysclasspath, java_home, dll_dir
534  {
535    char *pslash;
536    os::jvm_path(buf, bufsize);
537
538    // Found the full path to libjvm.so.
539    // Now cut the path to <java_home>/jre if we can.
540    pslash = strrchr(buf, '/');
541    if (pslash != NULL) {
542      *pslash = '\0';            // Get rid of /libjvm.so.
543    }
544    pslash = strrchr(buf, '/');
545    if (pslash != NULL) {
546      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
547    }
548    Arguments::set_dll_dir(buf);
549
550    if (pslash != NULL) {
551      pslash = strrchr(buf, '/');
552      if (pslash != NULL) {
553        *pslash = '\0';          // Get rid of /<arch>.
554        pslash = strrchr(buf, '/');
555        if (pslash != NULL) {
556          *pslash = '\0';        // Get rid of /lib.
557        }
558      }
559    }
560    Arguments::set_java_home(buf);
561    set_boot_path('/', ':');
562  }
563
564  // Where to look for native libraries.
565
566  // On Aix we get the user setting of LIBPATH.
567  // Eventually, all the library path setting will be done here.
568  // Get the user setting of LIBPATH.
569  const char *v = ::getenv("LIBPATH");
570  const char *v_colon = ":";
571  if (v == NULL) { v = ""; v_colon = ""; }
572
573  // Concatenate user and invariant part of ld_library_path.
574  // That's +1 for the colon and +1 for the trailing '\0'.
575  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
576  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
577  Arguments::set_library_path(ld_library_path);
578  FREE_C_HEAP_ARRAY(char, ld_library_path);
579
580  // Extensions directories.
581  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
582  Arguments::set_ext_dirs(buf);
583
584  FREE_C_HEAP_ARRAY(char, buf);
585
586#undef DEFAULT_LIBPATH
587#undef EXTENSIONS_DIR
588}
589
590////////////////////////////////////////////////////////////////////////////////
591// breakpoint support
592
593void os::breakpoint() {
594  BREAKPOINT;
595}
596
597extern "C" void breakpoint() {
598  // use debugger to set breakpoint here
599}
600
601////////////////////////////////////////////////////////////////////////////////
602// signal support
603
604debug_only(static bool signal_sets_initialized = false);
605static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
606
607bool os::Aix::is_sig_ignored(int sig) {
608  struct sigaction oact;
609  sigaction(sig, (struct sigaction*)NULL, &oact);
610  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
611    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
612  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
613    return true;
614  } else {
615    return false;
616  }
617}
618
619void os::Aix::signal_sets_init() {
620  // Should also have an assertion stating we are still single-threaded.
621  assert(!signal_sets_initialized, "Already initialized");
622  // Fill in signals that are necessarily unblocked for all threads in
623  // the VM. Currently, we unblock the following signals:
624  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
625  //                         by -Xrs (=ReduceSignalUsage));
626  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
627  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
628  // the dispositions or masks wrt these signals.
629  // Programs embedding the VM that want to use the above signals for their
630  // own purposes must, at this time, use the "-Xrs" option to prevent
631  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
632  // (See bug 4345157, and other related bugs).
633  // In reality, though, unblocking these signals is really a nop, since
634  // these signals are not blocked by default.
635  sigemptyset(&unblocked_sigs);
636  sigemptyset(&allowdebug_blocked_sigs);
637  sigaddset(&unblocked_sigs, SIGILL);
638  sigaddset(&unblocked_sigs, SIGSEGV);
639  sigaddset(&unblocked_sigs, SIGBUS);
640  sigaddset(&unblocked_sigs, SIGFPE);
641  sigaddset(&unblocked_sigs, SIGTRAP);
642  sigaddset(&unblocked_sigs, SIGDANGER);
643  sigaddset(&unblocked_sigs, SR_signum);
644
645  if (!ReduceSignalUsage) {
646   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
647     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
648     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
649   }
650   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
651     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
652     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
653   }
654   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
655     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
656     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
657   }
658  }
659  // Fill in signals that are blocked by all but the VM thread.
660  sigemptyset(&vm_sigs);
661  if (!ReduceSignalUsage)
662    sigaddset(&vm_sigs, BREAK_SIGNAL);
663  debug_only(signal_sets_initialized = true);
664}
665
666// These are signals that are unblocked while a thread is running Java.
667// (For some reason, they get blocked by default.)
668sigset_t* os::Aix::unblocked_signals() {
669  assert(signal_sets_initialized, "Not initialized");
670  return &unblocked_sigs;
671}
672
673// These are the signals that are blocked while a (non-VM) thread is
674// running Java. Only the VM thread handles these signals.
675sigset_t* os::Aix::vm_signals() {
676  assert(signal_sets_initialized, "Not initialized");
677  return &vm_sigs;
678}
679
680// These are signals that are blocked during cond_wait to allow debugger in
681sigset_t* os::Aix::allowdebug_blocked_signals() {
682  assert(signal_sets_initialized, "Not initialized");
683  return &allowdebug_blocked_sigs;
684}
685
686void os::Aix::hotspot_sigmask(Thread* thread) {
687
688  //Save caller's signal mask before setting VM signal mask
689  sigset_t caller_sigmask;
690  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
691
692  OSThread* osthread = thread->osthread();
693  osthread->set_caller_sigmask(caller_sigmask);
694
695  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
696
697  if (!ReduceSignalUsage) {
698    if (thread->is_VM_thread()) {
699      // Only the VM thread handles BREAK_SIGNAL ...
700      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
701    } else {
702      // ... all other threads block BREAK_SIGNAL
703      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
704    }
705  }
706}
707
708// retrieve memory information.
709// Returns false if something went wrong;
710// content of pmi undefined in this case.
711bool os::Aix::get_meminfo(meminfo_t* pmi) {
712
713  assert(pmi, "get_meminfo: invalid parameter");
714
715  memset(pmi, 0, sizeof(meminfo_t));
716
717  if (os::Aix::on_pase()) {
718    // On PASE, use the libo4 porting library.
719
720    unsigned long long virt_total = 0;
721    unsigned long long real_total = 0;
722    unsigned long long real_free = 0;
723    unsigned long long pgsp_total = 0;
724    unsigned long long pgsp_free = 0;
725    if (libo4::get_memory_info(&virt_total, &real_total, &real_free, &pgsp_total, &pgsp_free)) {
726      pmi->virt_total = virt_total;
727      pmi->real_total = real_total;
728      pmi->real_free = real_free;
729      pmi->pgsp_total = pgsp_total;
730      pmi->pgsp_free = pgsp_free;
731      return true;
732    }
733    return false;
734
735  } else {
736
737    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
738    // See:
739    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
740    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
741    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
742    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
743
744    perfstat_memory_total_t psmt;
745    memset (&psmt, '\0', sizeof(psmt));
746    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
747    if (rc == -1) {
748      trcVerbose("perfstat_memory_total() failed (errno=%d)", errno);
749      assert(0, "perfstat_memory_total() failed");
750      return false;
751    }
752
753    assert(rc == 1, "perfstat_memory_total() - weird return code");
754
755    // excerpt from
756    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
757    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
758    // The fields of perfstat_memory_total_t:
759    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
760    // u_longlong_t real_total         Total real memory (in 4 KB pages).
761    // u_longlong_t real_free          Free real memory (in 4 KB pages).
762    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
763    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
764
765    pmi->virt_total = psmt.virt_total * 4096;
766    pmi->real_total = psmt.real_total * 4096;
767    pmi->real_free = psmt.real_free * 4096;
768    pmi->pgsp_total = psmt.pgsp_total * 4096;
769    pmi->pgsp_free = psmt.pgsp_free * 4096;
770
771    return true;
772
773  }
774} // end os::Aix::get_meminfo
775
776//////////////////////////////////////////////////////////////////////////////
777// create new thread
778
779// Thread start routine for all newly created threads
780static void *thread_native_entry(Thread *thread) {
781
782  // find out my own stack dimensions
783  {
784    // actually, this should do exactly the same as thread->record_stack_base_and_size...
785    address base = 0;
786    size_t size = 0;
787    query_stack_dimensions(&base, &size);
788    thread->set_stack_base(base);
789    thread->set_stack_size(size);
790  }
791
792  const pthread_t pthread_id = ::pthread_self();
793  const tid_t kernel_thread_id = ::thread_self();
794
795  log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
796    os::current_thread_id(), (uintx) kernel_thread_id);
797
798  // Normally, pthread stacks on AIX live in the data segment (are allocated with malloc()
799  // by the pthread library). In rare cases, this may not be the case, e.g. when third-party
800  // tools hook pthread_create(). In this case, we may run into problems establishing
801  // guard pages on those stacks, because the stacks may reside in memory which is not
802  // protectable (shmated).
803  if (thread->stack_base() > ::sbrk(0)) {
804    log_warning(os, thread)("Thread stack not in data segment.");
805  }
806
807  // Try to randomize the cache line index of hot stack frames.
808  // This helps when threads of the same stack traces evict each other's
809  // cache lines. The threads can be either from the same JVM instance, or
810  // from different JVM instances. The benefit is especially true for
811  // processors with hyperthreading technology.
812
813  static int counter = 0;
814  int pid = os::current_process_id();
815  alloca(((pid ^ counter++) & 7) * 128);
816
817  thread->initialize_thread_current();
818
819  OSThread* osthread = thread->osthread();
820
821  // Thread_id is pthread id.
822  osthread->set_thread_id(pthread_id);
823
824  // .. but keep kernel thread id too for diagnostics
825  osthread->set_kernel_thread_id(kernel_thread_id);
826
827  // Initialize signal mask for this thread.
828  os::Aix::hotspot_sigmask(thread);
829
830  // Initialize floating point control register.
831  os::Aix::init_thread_fpu_state();
832
833  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
834
835  // Call one more level start routine.
836  thread->run();
837
838  log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
839    os::current_thread_id(), (uintx) kernel_thread_id);
840
841  // If a thread has not deleted itself ("delete this") as part of its
842  // termination sequence, we have to ensure thread-local-storage is
843  // cleared before we actually terminate. No threads should ever be
844  // deleted asynchronously with respect to their termination.
845  if (Thread::current_or_null_safe() != NULL) {
846    assert(Thread::current_or_null_safe() == thread, "current thread is wrong");
847    thread->clear_thread_current();
848  }
849
850  return 0;
851}
852
853bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
854
855  assert(thread->osthread() == NULL, "caller responsible");
856
857  // Allocate the OSThread object
858  OSThread* osthread = new OSThread(NULL, NULL);
859  if (osthread == NULL) {
860    return false;
861  }
862
863  // set the correct thread state
864  osthread->set_thread_type(thr_type);
865
866  // Initial state is ALLOCATED but not INITIALIZED
867  osthread->set_state(ALLOCATED);
868
869  thread->set_osthread(osthread);
870
871  // init thread attributes
872  pthread_attr_t attr;
873  pthread_attr_init(&attr);
874  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
875
876  // Make sure we run in 1:1 kernel-user-thread mode.
877  if (os::Aix::on_aix()) {
878    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
879    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
880  } // end: aix
881
882  // Start in suspended state, and in os::thread_start, wake the thread up.
883  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
884
885  // calculate stack size if it's not specified by caller
886  if (stack_size == 0) {
887    stack_size = os::Aix::default_stack_size(thr_type);
888
889    switch (thr_type) {
890    case os::java_thread:
891      // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
892      assert(JavaThread::stack_size_at_create() > 0, "this should be set");
893      stack_size = JavaThread::stack_size_at_create();
894      break;
895    case os::compiler_thread:
896      if (CompilerThreadStackSize > 0) {
897        stack_size = (size_t)(CompilerThreadStackSize * K);
898        break;
899      } // else fall through:
900        // use VMThreadStackSize if CompilerThreadStackSize is not defined
901    case os::vm_thread:
902    case os::pgc_thread:
903    case os::cgc_thread:
904    case os::watcher_thread:
905      if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
906      break;
907    }
908  }
909
910  stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
911  pthread_attr_setstacksize(&attr, stack_size);
912
913  pthread_t tid;
914  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) thread_native_entry, thread);
915
916
917  char buf[64];
918  if (ret == 0) {
919    log_info(os, thread)("Thread started (pthread id: " UINTX_FORMAT ", attributes: %s). ",
920      (uintx) tid, os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
921  } else {
922    log_warning(os, thread)("Failed to start thread - pthread_create failed (%d=%s) for attributes: %s.",
923      ret, os::errno_name(ret), os::Posix::describe_pthread_attr(buf, sizeof(buf), &attr));
924  }
925
926  pthread_attr_destroy(&attr);
927
928  if (ret != 0) {
929    // Need to clean up stuff we've allocated so far
930    thread->set_osthread(NULL);
931    delete osthread;
932    return false;
933  }
934
935  // OSThread::thread_id is the pthread id.
936  osthread->set_thread_id(tid);
937
938  return true;
939}
940
941/////////////////////////////////////////////////////////////////////////////
942// attach existing thread
943
944// bootstrap the main thread
945bool os::create_main_thread(JavaThread* thread) {
946  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
947  return create_attached_thread(thread);
948}
949
950bool os::create_attached_thread(JavaThread* thread) {
951#ifdef ASSERT
952    thread->verify_not_published();
953#endif
954
955  // Allocate the OSThread object
956  OSThread* osthread = new OSThread(NULL, NULL);
957
958  if (osthread == NULL) {
959    return false;
960  }
961
962  const pthread_t pthread_id = ::pthread_self();
963  const tid_t kernel_thread_id = ::thread_self();
964
965  // OSThread::thread_id is the pthread id.
966  osthread->set_thread_id(pthread_id);
967
968  // .. but keep kernel thread id too for diagnostics
969  osthread->set_kernel_thread_id(kernel_thread_id);
970
971  // initialize floating point control register
972  os::Aix::init_thread_fpu_state();
973
974  // Initial thread state is RUNNABLE
975  osthread->set_state(RUNNABLE);
976
977  thread->set_osthread(osthread);
978
979  if (UseNUMA) {
980    int lgrp_id = os::numa_get_group_id();
981    if (lgrp_id != -1) {
982      thread->set_lgrp_id(lgrp_id);
983    }
984  }
985
986  // initialize signal mask for this thread
987  // and save the caller's signal mask
988  os::Aix::hotspot_sigmask(thread);
989
990  log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ", kernel thread id: " UINTX_FORMAT ").",
991    os::current_thread_id(), (uintx) kernel_thread_id);
992
993  return true;
994}
995
996void os::pd_start_thread(Thread* thread) {
997  int status = pthread_continue_np(thread->osthread()->pthread_id());
998  assert(status == 0, "thr_continue failed");
999}
1000
1001// Free OS resources related to the OSThread
1002void os::free_thread(OSThread* osthread) {
1003  assert(osthread != NULL, "osthread not set");
1004
1005  // We are told to free resources of the argument thread,
1006  // but we can only really operate on the current thread.
1007  assert(Thread::current()->osthread() == osthread,
1008         "os::free_thread but not current thread");
1009
1010  // Restore caller's signal mask
1011  sigset_t sigmask = osthread->caller_sigmask();
1012  pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1013
1014  delete osthread;
1015}
1016
1017////////////////////////////////////////////////////////////////////////////////
1018// time support
1019
1020// Time since start-up in seconds to a fine granularity.
1021// Used by VMSelfDestructTimer and the MemProfiler.
1022double os::elapsedTime() {
1023  return (double)(os::elapsed_counter()) * 0.000001;
1024}
1025
1026jlong os::elapsed_counter() {
1027  timeval time;
1028  int status = gettimeofday(&time, NULL);
1029  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1030}
1031
1032jlong os::elapsed_frequency() {
1033  return (1000 * 1000);
1034}
1035
1036bool os::supports_vtime() { return true; }
1037bool os::enable_vtime()   { return false; }
1038bool os::vtime_enabled()  { return false; }
1039
1040double os::elapsedVTime() {
1041  struct rusage usage;
1042  int retval = getrusage(RUSAGE_THREAD, &usage);
1043  if (retval == 0) {
1044    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1045  } else {
1046    // better than nothing, but not much
1047    return elapsedTime();
1048  }
1049}
1050
1051jlong os::javaTimeMillis() {
1052  timeval time;
1053  int status = gettimeofday(&time, NULL);
1054  assert(status != -1, "aix error at gettimeofday()");
1055  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1056}
1057
1058void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1059  timeval time;
1060  int status = gettimeofday(&time, NULL);
1061  assert(status != -1, "aix error at gettimeofday()");
1062  seconds = jlong(time.tv_sec);
1063  nanos = jlong(time.tv_usec) * 1000;
1064}
1065
1066jlong os::javaTimeNanos() {
1067  if (os::Aix::on_pase()) {
1068
1069    timeval time;
1070    int status = gettimeofday(&time, NULL);
1071    assert(status != -1, "PASE error at gettimeofday()");
1072    jlong usecs = jlong((unsigned long long) time.tv_sec * (1000 * 1000) + time.tv_usec);
1073    return 1000 * usecs;
1074
1075  } else {
1076    // On AIX use the precision of processors real time clock
1077    // or time base registers.
1078    timebasestruct_t time;
1079    int rc;
1080
1081    // If the CPU has a time register, it will be used and
1082    // we have to convert to real time first. After convertion we have following data:
1083    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1084    // time.tb_low  [nanoseconds after the last full second above]
1085    // We better use mread_real_time here instead of read_real_time
1086    // to ensure that we will get a monotonic increasing time.
1087    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1088      rc = time_base_to_time(&time, TIMEBASE_SZ);
1089      assert(rc != -1, "aix error at time_base_to_time()");
1090    }
1091    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1092  }
1093}
1094
1095void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1096  info_ptr->max_value = ALL_64_BITS;
1097  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1098  info_ptr->may_skip_backward = false;
1099  info_ptr->may_skip_forward = false;
1100  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1101}
1102
1103// Return the real, user, and system times in seconds from an
1104// arbitrary fixed point in the past.
1105bool os::getTimesSecs(double* process_real_time,
1106                      double* process_user_time,
1107                      double* process_system_time) {
1108  struct tms ticks;
1109  clock_t real_ticks = times(&ticks);
1110
1111  if (real_ticks == (clock_t) (-1)) {
1112    return false;
1113  } else {
1114    double ticks_per_second = (double) clock_tics_per_sec;
1115    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1116    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1117    *process_real_time = ((double) real_ticks) / ticks_per_second;
1118
1119    return true;
1120  }
1121}
1122
1123char * os::local_time_string(char *buf, size_t buflen) {
1124  struct tm t;
1125  time_t long_time;
1126  time(&long_time);
1127  localtime_r(&long_time, &t);
1128  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1129               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1130               t.tm_hour, t.tm_min, t.tm_sec);
1131  return buf;
1132}
1133
1134struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1135  return localtime_r(clock, res);
1136}
1137
1138////////////////////////////////////////////////////////////////////////////////
1139// runtime exit support
1140
1141// Note: os::shutdown() might be called very early during initialization, or
1142// called from signal handler. Before adding something to os::shutdown(), make
1143// sure it is async-safe and can handle partially initialized VM.
1144void os::shutdown() {
1145
1146  // allow PerfMemory to attempt cleanup of any persistent resources
1147  perfMemory_exit();
1148
1149  // needs to remove object in file system
1150  AttachListener::abort();
1151
1152  // flush buffered output, finish log files
1153  ostream_abort();
1154
1155  // Check for abort hook
1156  abort_hook_t abort_hook = Arguments::abort_hook();
1157  if (abort_hook != NULL) {
1158    abort_hook();
1159  }
1160}
1161
1162// Note: os::abort() might be called very early during initialization, or
1163// called from signal handler. Before adding something to os::abort(), make
1164// sure it is async-safe and can handle partially initialized VM.
1165void os::abort(bool dump_core, void* siginfo, const void* context) {
1166  os::shutdown();
1167  if (dump_core) {
1168#ifndef PRODUCT
1169    fdStream out(defaultStream::output_fd());
1170    out.print_raw("Current thread is ");
1171    char buf[16];
1172    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1173    out.print_raw_cr(buf);
1174    out.print_raw_cr("Dumping core ...");
1175#endif
1176    ::abort(); // dump core
1177  }
1178
1179  ::exit(1);
1180}
1181
1182// Die immediately, no exit hook, no abort hook, no cleanup.
1183void os::die() {
1184  ::abort();
1185}
1186
1187// This method is a copy of JDK's sysGetLastErrorString
1188// from src/solaris/hpi/src/system_md.c
1189
1190size_t os::lasterror(char *buf, size_t len) {
1191  if (errno == 0) return 0;
1192
1193  const char *s = os::strerror(errno);
1194  size_t n = ::strlen(s);
1195  if (n >= len) {
1196    n = len - 1;
1197  }
1198  ::strncpy(buf, s, n);
1199  buf[n] = '\0';
1200  return n;
1201}
1202
1203intx os::current_thread_id() {
1204  return (intx)pthread_self();
1205}
1206
1207int os::current_process_id() {
1208  return getpid();
1209}
1210
1211// DLL functions
1212
1213const char* os::dll_file_extension() { return ".so"; }
1214
1215// This must be hard coded because it's the system's temporary
1216// directory not the java application's temp directory, ala java.io.tmpdir.
1217const char* os::get_temp_directory() { return "/tmp"; }
1218
1219static bool file_exists(const char* filename) {
1220  struct stat statbuf;
1221  if (filename == NULL || strlen(filename) == 0) {
1222    return false;
1223  }
1224  return os::stat(filename, &statbuf) == 0;
1225}
1226
1227bool os::dll_build_name(char* buffer, size_t buflen,
1228                        const char* pname, const char* fname) {
1229  bool retval = false;
1230  // Copied from libhpi
1231  const size_t pnamelen = pname ? strlen(pname) : 0;
1232
1233  // Return error on buffer overflow.
1234  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1235    *buffer = '\0';
1236    return retval;
1237  }
1238
1239  if (pnamelen == 0) {
1240    snprintf(buffer, buflen, "lib%s.so", fname);
1241    retval = true;
1242  } else if (strchr(pname, *os::path_separator()) != NULL) {
1243    int n;
1244    char** pelements = split_path(pname, &n);
1245    if (pelements == NULL) {
1246      return false;
1247    }
1248    for (int i = 0; i < n; i++) {
1249      // Really shouldn't be NULL, but check can't hurt
1250      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1251        continue; // skip the empty path values
1252      }
1253      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1254      if (file_exists(buffer)) {
1255        retval = true;
1256        break;
1257      }
1258    }
1259    // release the storage
1260    for (int i = 0; i < n; i++) {
1261      if (pelements[i] != NULL) {
1262        FREE_C_HEAP_ARRAY(char, pelements[i]);
1263      }
1264    }
1265    if (pelements != NULL) {
1266      FREE_C_HEAP_ARRAY(char*, pelements);
1267    }
1268  } else {
1269    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1270    retval = true;
1271  }
1272  return retval;
1273}
1274
1275// Check if addr is inside libjvm.so.
1276bool os::address_is_in_vm(address addr) {
1277
1278  // Input could be a real pc or a function pointer literal. The latter
1279  // would be a function descriptor residing in the data segment of a module.
1280  loaded_module_t lm;
1281  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL) {
1282    return lm.is_in_vm;
1283  } else if (LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
1284    return lm.is_in_vm;
1285  } else {
1286    return false;
1287  }
1288
1289}
1290
1291// Resolve an AIX function descriptor literal to a code pointer.
1292// If the input is a valid code pointer to a text segment of a loaded module,
1293//   it is returned unchanged.
1294// If the input is a valid AIX function descriptor, it is resolved to the
1295//   code entry point.
1296// If the input is neither a valid function descriptor nor a valid code pointer,
1297//   NULL is returned.
1298static address resolve_function_descriptor_to_code_pointer(address p) {
1299
1300  if (LoadedLibraries::find_for_text_address(p, NULL) != NULL) {
1301    // It is a real code pointer.
1302    return p;
1303  } else if (LoadedLibraries::find_for_data_address(p, NULL) != NULL) {
1304    // Pointer to data segment, potential function descriptor.
1305    address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1306    if (LoadedLibraries::find_for_text_address(code_entry, NULL) != NULL) {
1307      // It is a function descriptor.
1308      return code_entry;
1309    }
1310  }
1311
1312  return NULL;
1313}
1314
1315bool os::dll_address_to_function_name(address addr, char *buf,
1316                                      int buflen, int *offset,
1317                                      bool demangle) {
1318  if (offset) {
1319    *offset = -1;
1320  }
1321  // Buf is not optional, but offset is optional.
1322  assert(buf != NULL, "sanity check");
1323  buf[0] = '\0';
1324
1325  // Resolve function ptr literals first.
1326  addr = resolve_function_descriptor_to_code_pointer(addr);
1327  if (!addr) {
1328    return false;
1329  }
1330
1331  return AixSymbols::get_function_name(addr, buf, buflen, offset, NULL, demangle);
1332}
1333
1334bool os::dll_address_to_library_name(address addr, char* buf,
1335                                     int buflen, int* offset) {
1336  if (offset) {
1337    *offset = -1;
1338  }
1339  // Buf is not optional, but offset is optional.
1340  assert(buf != NULL, "sanity check");
1341  buf[0] = '\0';
1342
1343  // Resolve function ptr literals first.
1344  addr = resolve_function_descriptor_to_code_pointer(addr);
1345  if (!addr) {
1346    return false;
1347  }
1348
1349  return AixSymbols::get_module_name(addr, buf, buflen);
1350}
1351
1352// Loads .dll/.so and in case of error it checks if .dll/.so was built
1353// for the same architecture as Hotspot is running on.
1354void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1355
1356  if (ebuf && ebuflen > 0) {
1357    ebuf[0] = '\0';
1358    ebuf[ebuflen - 1] = '\0';
1359  }
1360
1361  if (!filename || strlen(filename) == 0) {
1362    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1363    return NULL;
1364  }
1365
1366  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1367  void * result= ::dlopen(filename, RTLD_LAZY);
1368  if (result != NULL) {
1369    // Reload dll cache. Don't do this in signal handling.
1370    LoadedLibraries::reload();
1371    return result;
1372  } else {
1373    // error analysis when dlopen fails
1374    const char* const error_report = ::dlerror();
1375    if (error_report && ebuf && ebuflen > 0) {
1376      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1377               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1378    }
1379  }
1380  return NULL;
1381}
1382
1383void* os::dll_lookup(void* handle, const char* name) {
1384  void* res = dlsym(handle, name);
1385  return res;
1386}
1387
1388void* os::get_default_process_handle() {
1389  return (void*)::dlopen(NULL, RTLD_LAZY);
1390}
1391
1392void os::print_dll_info(outputStream *st) {
1393  st->print_cr("Dynamic libraries:");
1394  LoadedLibraries::print(st);
1395}
1396
1397void os::get_summary_os_info(char* buf, size_t buflen) {
1398  // There might be something more readable than uname results for AIX.
1399  struct utsname name;
1400  uname(&name);
1401  snprintf(buf, buflen, "%s %s", name.release, name.version);
1402}
1403
1404void os::print_os_info(outputStream* st) {
1405  st->print("OS:");
1406
1407  st->print("uname:");
1408  struct utsname name;
1409  uname(&name);
1410  st->print(name.sysname); st->print(" ");
1411  st->print(name.nodename); st->print(" ");
1412  st->print(name.release); st->print(" ");
1413  st->print(name.version); st->print(" ");
1414  st->print(name.machine);
1415  st->cr();
1416
1417  uint32_t ver = os::Aix::os_version();
1418  st->print_cr("AIX kernel version %u.%u.%u.%u",
1419               (ver >> 24) & 0xFF, (ver >> 16) & 0xFF, (ver >> 8) & 0xFF, ver & 0xFF);
1420
1421  // rlimit
1422  st->print("rlimit:");
1423  struct rlimit rlim;
1424
1425  st->print(" STACK ");
1426  getrlimit(RLIMIT_STACK, &rlim);
1427  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1428  else st->print("%uk", rlim.rlim_cur >> 10);
1429
1430  st->print(", CORE ");
1431  getrlimit(RLIMIT_CORE, &rlim);
1432  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1433  else st->print("%uk", rlim.rlim_cur >> 10);
1434
1435  st->print(", NPROC ");
1436  st->print("%d", sysconf(_SC_CHILD_MAX));
1437
1438  st->print(", NOFILE ");
1439  getrlimit(RLIMIT_NOFILE, &rlim);
1440  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1441  else st->print("%d", rlim.rlim_cur);
1442
1443  st->print(", AS ");
1444  getrlimit(RLIMIT_AS, &rlim);
1445  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1446  else st->print("%uk", rlim.rlim_cur >> 10);
1447
1448  // Print limits on DATA, because it limits the C-heap.
1449  st->print(", DATA ");
1450  getrlimit(RLIMIT_DATA, &rlim);
1451  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1452  else st->print("%uk", rlim.rlim_cur >> 10);
1453  st->cr();
1454
1455  // load average
1456  st->print("load average:");
1457  double loadavg[3] = {-1.L, -1.L, -1.L};
1458  os::loadavg(loadavg, 3);
1459  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1460  st->cr();
1461
1462  // print wpar info
1463  libperfstat::wparinfo_t wi;
1464  if (libperfstat::get_wparinfo(&wi)) {
1465    st->print_cr("wpar info");
1466    st->print_cr("name: %s", wi.name);
1467    st->print_cr("id:   %d", wi.wpar_id);
1468    st->print_cr("type: %s", (wi.app_wpar ? "application" : "system"));
1469  }
1470
1471  // print partition info
1472  libperfstat::partitioninfo_t pi;
1473  if (libperfstat::get_partitioninfo(&pi)) {
1474    st->print_cr("partition info");
1475    st->print_cr(" name: %s", pi.name);
1476  }
1477
1478}
1479
1480void os::print_memory_info(outputStream* st) {
1481
1482  st->print_cr("Memory:");
1483
1484  st->print_cr("  Base page size (sysconf _SC_PAGESIZE):  %s",
1485    describe_pagesize(g_multipage_support.pagesize));
1486  st->print_cr("  Data page size (C-Heap, bss, etc):      %s",
1487    describe_pagesize(g_multipage_support.datapsize));
1488  st->print_cr("  Text page size:                         %s",
1489    describe_pagesize(g_multipage_support.textpsize));
1490  st->print_cr("  Thread stack page size (pthread):       %s",
1491    describe_pagesize(g_multipage_support.pthr_stack_pagesize));
1492  st->print_cr("  Default shared memory page size:        %s",
1493    describe_pagesize(g_multipage_support.shmpsize));
1494  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1495    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1496  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1497    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1498  st->print_cr("  Multipage error: %d",
1499    g_multipage_support.error);
1500  st->cr();
1501  st->print_cr("  os::vm_page_size:       %s", describe_pagesize(os::vm_page_size()));
1502  // not used in OpenJDK st->print_cr("  os::stack_page_size:    %s", describe_pagesize(os::stack_page_size()));
1503
1504  // print out LDR_CNTRL because it affects the default page sizes
1505  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1506  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1507
1508  // Print out EXTSHM because it is an unsupported setting.
1509  const char* const extshm = ::getenv("EXTSHM");
1510  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1511  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1512    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1513  }
1514
1515  // Print out AIXTHREAD_GUARDPAGES because it affects the size of pthread stacks.
1516  const char* const aixthread_guardpages = ::getenv("AIXTHREAD_GUARDPAGES");
1517  st->print_cr("  AIXTHREAD_GUARDPAGES=%s.",
1518      aixthread_guardpages ? aixthread_guardpages : "<unset>");
1519
1520  os::Aix::meminfo_t mi;
1521  if (os::Aix::get_meminfo(&mi)) {
1522    char buffer[256];
1523    if (os::Aix::on_aix()) {
1524      st->print_cr("physical total : " SIZE_FORMAT, mi.real_total);
1525      st->print_cr("physical free  : " SIZE_FORMAT, mi.real_free);
1526      st->print_cr("swap total     : " SIZE_FORMAT, mi.pgsp_total);
1527      st->print_cr("swap free      : " SIZE_FORMAT, mi.pgsp_free);
1528    } else {
1529      // PASE - Numbers are result of QWCRSSTS; they mean:
1530      // real_total: Sum of all system pools
1531      // real_free: always 0
1532      // pgsp_total: we take the size of the system ASP
1533      // pgsp_free: size of system ASP times percentage of system ASP unused
1534      st->print_cr("physical total     : " SIZE_FORMAT, mi.real_total);
1535      st->print_cr("system asp total   : " SIZE_FORMAT, mi.pgsp_total);
1536      st->print_cr("%% system asp used : " SIZE_FORMAT,
1537        mi.pgsp_total ? (100.0f * (mi.pgsp_total - mi.pgsp_free) / mi.pgsp_total) : -1.0f);
1538    }
1539    st->print_raw(buffer);
1540  }
1541  st->cr();
1542
1543  // Print segments allocated with os::reserve_memory.
1544  st->print_cr("internal virtual memory regions used by vm:");
1545  vmembk_print_on(st);
1546}
1547
1548// Get a string for the cpuinfo that is a summary of the cpu type
1549void os::get_summary_cpu_info(char* buf, size_t buflen) {
1550  // This looks good
1551  libperfstat::cpuinfo_t ci;
1552  if (libperfstat::get_cpuinfo(&ci)) {
1553    strncpy(buf, ci.version, buflen);
1554  } else {
1555    strncpy(buf, "AIX", buflen);
1556  }
1557}
1558
1559void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1560  st->print("CPU:");
1561  st->print("total %d", os::processor_count());
1562  // It's not safe to query number of active processors after crash.
1563  // st->print("(active %d)", os::active_processor_count());
1564  st->print(" %s", VM_Version::features());
1565  st->cr();
1566}
1567
1568static void print_signal_handler(outputStream* st, int sig,
1569                                 char* buf, size_t buflen);
1570
1571void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1572  st->print_cr("Signal Handlers:");
1573  print_signal_handler(st, SIGSEGV, buf, buflen);
1574  print_signal_handler(st, SIGBUS , buf, buflen);
1575  print_signal_handler(st, SIGFPE , buf, buflen);
1576  print_signal_handler(st, SIGPIPE, buf, buflen);
1577  print_signal_handler(st, SIGXFSZ, buf, buflen);
1578  print_signal_handler(st, SIGILL , buf, buflen);
1579  print_signal_handler(st, SR_signum, buf, buflen);
1580  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1581  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1582  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1583  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1584  print_signal_handler(st, SIGTRAP, buf, buflen);
1585  print_signal_handler(st, SIGDANGER, buf, buflen);
1586}
1587
1588static char saved_jvm_path[MAXPATHLEN] = {0};
1589
1590// Find the full path to the current module, libjvm.so.
1591void os::jvm_path(char *buf, jint buflen) {
1592  // Error checking.
1593  if (buflen < MAXPATHLEN) {
1594    assert(false, "must use a large-enough buffer");
1595    buf[0] = '\0';
1596    return;
1597  }
1598  // Lazy resolve the path to current module.
1599  if (saved_jvm_path[0] != 0) {
1600    strcpy(buf, saved_jvm_path);
1601    return;
1602  }
1603
1604  Dl_info dlinfo;
1605  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1606  assert(ret != 0, "cannot locate libjvm");
1607  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1608  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1609
1610  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1611  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1612}
1613
1614void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1615  // no prefix required, not even "_"
1616}
1617
1618void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1619  // no suffix required
1620}
1621
1622////////////////////////////////////////////////////////////////////////////////
1623// sun.misc.Signal support
1624
1625static volatile jint sigint_count = 0;
1626
1627static void
1628UserHandler(int sig, void *siginfo, void *context) {
1629  // 4511530 - sem_post is serialized and handled by the manager thread. When
1630  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1631  // don't want to flood the manager thread with sem_post requests.
1632  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1633    return;
1634
1635  // Ctrl-C is pressed during error reporting, likely because the error
1636  // handler fails to abort. Let VM die immediately.
1637  if (sig == SIGINT && is_error_reported()) {
1638    os::die();
1639  }
1640
1641  os::signal_notify(sig);
1642}
1643
1644void* os::user_handler() {
1645  return CAST_FROM_FN_PTR(void*, UserHandler);
1646}
1647
1648extern "C" {
1649  typedef void (*sa_handler_t)(int);
1650  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1651}
1652
1653void* os::signal(int signal_number, void* handler) {
1654  struct sigaction sigAct, oldSigAct;
1655
1656  sigfillset(&(sigAct.sa_mask));
1657
1658  // Do not block out synchronous signals in the signal handler.
1659  // Blocking synchronous signals only makes sense if you can really
1660  // be sure that those signals won't happen during signal handling,
1661  // when the blocking applies. Normal signal handlers are lean and
1662  // do not cause signals. But our signal handlers tend to be "risky"
1663  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1664  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1665  // by a SIGILL, which was blocked due to the signal mask. The process
1666  // just hung forever. Better to crash from a secondary signal than to hang.
1667  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1668  sigdelset(&(sigAct.sa_mask), SIGBUS);
1669  sigdelset(&(sigAct.sa_mask), SIGILL);
1670  sigdelset(&(sigAct.sa_mask), SIGFPE);
1671  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1672
1673  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1674
1675  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1676
1677  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1678    // -1 means registration failed
1679    return (void *)-1;
1680  }
1681
1682  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1683}
1684
1685void os::signal_raise(int signal_number) {
1686  ::raise(signal_number);
1687}
1688
1689//
1690// The following code is moved from os.cpp for making this
1691// code platform specific, which it is by its very nature.
1692//
1693
1694// Will be modified when max signal is changed to be dynamic
1695int os::sigexitnum_pd() {
1696  return NSIG;
1697}
1698
1699// a counter for each possible signal value
1700static volatile jint pending_signals[NSIG+1] = { 0 };
1701
1702// Wrapper functions for: sem_init(), sem_post(), sem_wait()
1703// On AIX, we use sem_init(), sem_post(), sem_wait()
1704// On Pase, we need to use msem_lock() and msem_unlock(), because Posix Semaphores
1705// do not seem to work at all on PASE (unimplemented, will cause SIGILL).
1706// Note that just using msem_.. APIs for both PASE and AIX is not an option either, as
1707// on AIX, msem_..() calls are suspected of causing problems.
1708static sem_t sig_sem;
1709static msemaphore* p_sig_msem = 0;
1710
1711static void local_sem_init() {
1712  if (os::Aix::on_aix()) {
1713    int rc = ::sem_init(&sig_sem, 0, 0);
1714    guarantee(rc != -1, "sem_init failed");
1715  } else {
1716    // Memory semaphores must live in shared mem.
1717    guarantee0(p_sig_msem == NULL);
1718    p_sig_msem = (msemaphore*)os::reserve_memory(sizeof(msemaphore), NULL);
1719    guarantee(p_sig_msem, "Cannot allocate memory for memory semaphore");
1720    guarantee(::msem_init(p_sig_msem, 0) == p_sig_msem, "msem_init failed");
1721  }
1722}
1723
1724static void local_sem_post() {
1725  static bool warn_only_once = false;
1726  if (os::Aix::on_aix()) {
1727    int rc = ::sem_post(&sig_sem);
1728    if (rc == -1 && !warn_only_once) {
1729      trcVerbose("sem_post failed (errno = %d, %s)", errno, os::errno_name(errno));
1730      warn_only_once = true;
1731    }
1732  } else {
1733    guarantee0(p_sig_msem != NULL);
1734    int rc = ::msem_unlock(p_sig_msem, 0);
1735    if (rc == -1 && !warn_only_once) {
1736      trcVerbose("msem_unlock failed (errno = %d, %s)", errno, os::errno_name(errno));
1737      warn_only_once = true;
1738    }
1739  }
1740}
1741
1742static void local_sem_wait() {
1743  static bool warn_only_once = false;
1744  if (os::Aix::on_aix()) {
1745    int rc = ::sem_wait(&sig_sem);
1746    if (rc == -1 && !warn_only_once) {
1747      trcVerbose("sem_wait failed (errno = %d, %s)", errno, os::errno_name(errno));
1748      warn_only_once = true;
1749    }
1750  } else {
1751    guarantee0(p_sig_msem != NULL); // must init before use
1752    int rc = ::msem_lock(p_sig_msem, 0);
1753    if (rc == -1 && !warn_only_once) {
1754      trcVerbose("msem_lock failed (errno = %d, %s)", errno, os::errno_name(errno));
1755      warn_only_once = true;
1756    }
1757  }
1758}
1759
1760void os::signal_init_pd() {
1761  // Initialize signal structures
1762  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1763
1764  // Initialize signal semaphore
1765  local_sem_init();
1766}
1767
1768void os::signal_notify(int sig) {
1769  Atomic::inc(&pending_signals[sig]);
1770  local_sem_post();
1771}
1772
1773static int check_pending_signals(bool wait) {
1774  Atomic::store(0, &sigint_count);
1775  for (;;) {
1776    for (int i = 0; i < NSIG + 1; i++) {
1777      jint n = pending_signals[i];
1778      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1779        return i;
1780      }
1781    }
1782    if (!wait) {
1783      return -1;
1784    }
1785    JavaThread *thread = JavaThread::current();
1786    ThreadBlockInVM tbivm(thread);
1787
1788    bool threadIsSuspended;
1789    do {
1790      thread->set_suspend_equivalent();
1791      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1792
1793      local_sem_wait();
1794
1795      // were we externally suspended while we were waiting?
1796      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1797      if (threadIsSuspended) {
1798        //
1799        // The semaphore has been incremented, but while we were waiting
1800        // another thread suspended us. We don't want to continue running
1801        // while suspended because that would surprise the thread that
1802        // suspended us.
1803        //
1804
1805        local_sem_post();
1806
1807        thread->java_suspend_self();
1808      }
1809    } while (threadIsSuspended);
1810  }
1811}
1812
1813int os::signal_lookup() {
1814  return check_pending_signals(false);
1815}
1816
1817int os::signal_wait() {
1818  return check_pending_signals(true);
1819}
1820
1821////////////////////////////////////////////////////////////////////////////////
1822// Virtual Memory
1823
1824// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1825
1826#define VMEM_MAPPED  1
1827#define VMEM_SHMATED 2
1828
1829struct vmembk_t {
1830  int type;         // 1 - mmap, 2 - shmat
1831  char* addr;
1832  size_t size;      // Real size, may be larger than usersize.
1833  size_t pagesize;  // page size of area
1834  vmembk_t* next;
1835
1836  bool contains_addr(char* p) const {
1837    return p >= addr && p < (addr + size);
1838  }
1839
1840  bool contains_range(char* p, size_t s) const {
1841    return contains_addr(p) && contains_addr(p + s - 1);
1842  }
1843
1844  void print_on(outputStream* os) const {
1845    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1846      " bytes, %d %s pages), %s",
1847      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1848      (type == VMEM_SHMATED ? "shmat" : "mmap")
1849    );
1850  }
1851
1852  // Check that range is a sub range of memory block (or equal to memory block);
1853  // also check that range is fully page aligned to the page size if the block.
1854  void assert_is_valid_subrange(char* p, size_t s) const {
1855    if (!contains_range(p, s)) {
1856      trcVerbose("[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1857              "range of [" PTR_FORMAT " - " PTR_FORMAT "].",
1858              p, p + s, addr, addr + size);
1859      guarantee0(false);
1860    }
1861    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1862      trcVerbose("range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1863              " aligned to pagesize (%lu)", p, p + s, (unsigned long) pagesize);
1864      guarantee0(false);
1865    }
1866  }
1867};
1868
1869static struct {
1870  vmembk_t* first;
1871  MiscUtils::CritSect cs;
1872} vmem;
1873
1874static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1875  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1876  assert0(p);
1877  if (p) {
1878    MiscUtils::AutoCritSect lck(&vmem.cs);
1879    p->addr = addr; p->size = size;
1880    p->pagesize = pagesize;
1881    p->type = type;
1882    p->next = vmem.first;
1883    vmem.first = p;
1884  }
1885}
1886
1887static vmembk_t* vmembk_find(char* addr) {
1888  MiscUtils::AutoCritSect lck(&vmem.cs);
1889  for (vmembk_t* p = vmem.first; p; p = p->next) {
1890    if (p->addr <= addr && (p->addr + p->size) > addr) {
1891      return p;
1892    }
1893  }
1894  return NULL;
1895}
1896
1897static void vmembk_remove(vmembk_t* p0) {
1898  MiscUtils::AutoCritSect lck(&vmem.cs);
1899  assert0(p0);
1900  assert0(vmem.first); // List should not be empty.
1901  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1902    if (*pp == p0) {
1903      *pp = p0->next;
1904      ::free(p0);
1905      return;
1906    }
1907  }
1908  assert0(false); // Not found?
1909}
1910
1911static void vmembk_print_on(outputStream* os) {
1912  MiscUtils::AutoCritSect lck(&vmem.cs);
1913  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1914    vmi->print_on(os);
1915    os->cr();
1916  }
1917}
1918
1919// Reserve and attach a section of System V memory.
1920// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1921// address. Failing that, it will attach the memory anywhere.
1922// If <requested_addr> is NULL, function will attach the memory anywhere.
1923//
1924// <alignment_hint> is being ignored by this function. It is very probable however that the
1925// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1926// Should this be not enogh, we can put more work into it.
1927static char* reserve_shmated_memory (
1928  size_t bytes,
1929  char* requested_addr,
1930  size_t alignment_hint) {
1931
1932  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1933    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1934    bytes, requested_addr, alignment_hint);
1935
1936  // Either give me wish address or wish alignment but not both.
1937  assert0(!(requested_addr != NULL && alignment_hint != 0));
1938
1939  // We must prevent anyone from attaching too close to the
1940  // BRK because that may cause malloc OOM.
1941  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1942    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1943      "Will attach anywhere.", requested_addr);
1944    // Act like the OS refused to attach there.
1945    requested_addr = NULL;
1946  }
1947
1948  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
1949  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
1950  if (os::Aix::on_pase_V5R4_or_older()) {
1951    ShouldNotReachHere();
1952  }
1953
1954  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
1955  const size_t size = align_size_up(bytes, SIZE_64K);
1956
1957  // Reserve the shared segment.
1958  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
1959  if (shmid == -1) {
1960    trcVerbose("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
1961    return NULL;
1962  }
1963
1964  // Important note:
1965  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
1966  // We must right after attaching it remove it from the system. System V shm segments are global and
1967  // survive the process.
1968  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
1969
1970  struct shmid_ds shmbuf;
1971  memset(&shmbuf, 0, sizeof(shmbuf));
1972  shmbuf.shm_pagesize = SIZE_64K;
1973  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
1974    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
1975               size / SIZE_64K, errno);
1976    // I want to know if this ever happens.
1977    assert(false, "failed to set page size for shmat");
1978  }
1979
1980  // Now attach the shared segment.
1981  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
1982  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
1983  // were not a segment boundary.
1984  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
1985  const int errno_shmat = errno;
1986
1987  // (A) Right after shmat and before handing shmat errors delete the shm segment.
1988  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
1989    trcVerbose("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
1990    assert(false, "failed to remove shared memory segment!");
1991  }
1992
1993  // Handle shmat error. If we failed to attach, just return.
1994  if (addr == (char*)-1) {
1995    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
1996    return NULL;
1997  }
1998
1999  // Just for info: query the real page size. In case setting the page size did not
2000  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2001  const size_t real_pagesize = os::Aix::query_pagesize(addr);
2002  if (real_pagesize != shmbuf.shm_pagesize) {
2003    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2004  }
2005
2006  if (addr) {
2007    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2008      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2009  } else {
2010    if (requested_addr != NULL) {
2011      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2012    } else {
2013      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2014    }
2015  }
2016
2017  // book-keeping
2018  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2019  assert0(is_aligned_to(addr, os::vm_page_size()));
2020
2021  return addr;
2022}
2023
2024static bool release_shmated_memory(char* addr, size_t size) {
2025
2026  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2027    addr, addr + size - 1);
2028
2029  bool rc = false;
2030
2031  // TODO: is there a way to verify shm size without doing bookkeeping?
2032  if (::shmdt(addr) != 0) {
2033    trcVerbose("error (%d).", errno);
2034  } else {
2035    trcVerbose("ok.");
2036    rc = true;
2037  }
2038  return rc;
2039}
2040
2041static bool uncommit_shmated_memory(char* addr, size_t size) {
2042  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2043    addr, addr + size - 1);
2044
2045  const bool rc = my_disclaim64(addr, size);
2046
2047  if (!rc) {
2048    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2049    return false;
2050  }
2051  return true;
2052}
2053
2054////////////////////////////////  mmap-based routines /////////////////////////////////
2055
2056// Reserve memory via mmap.
2057// If <requested_addr> is given, an attempt is made to attach at the given address.
2058// Failing that, memory is allocated at any address.
2059// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2060// allocate at an address aligned with the given alignment. Failing that, memory
2061// is aligned anywhere.
2062static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2063  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2064    "alignment_hint " UINTX_FORMAT "...",
2065    bytes, requested_addr, alignment_hint);
2066
2067  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2068  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2069    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2070    return NULL;
2071  }
2072
2073  // We must prevent anyone from attaching too close to the
2074  // BRK because that may cause malloc OOM.
2075  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2076    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2077      "Will attach anywhere.", requested_addr);
2078    // Act like the OS refused to attach there.
2079    requested_addr = NULL;
2080  }
2081
2082  // Specify one or the other but not both.
2083  assert0(!(requested_addr != NULL && alignment_hint > 0));
2084
2085  // In 64K mode, we claim the global page size (os::vm_page_size())
2086  // is 64K. This is one of the few points where that illusion may
2087  // break, because mmap() will always return memory aligned to 4K. So
2088  // we must ensure we only ever return memory aligned to 64k.
2089  if (alignment_hint) {
2090    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2091  } else {
2092    alignment_hint = os::vm_page_size();
2093  }
2094
2095  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2096  const size_t size = align_size_up(bytes, os::vm_page_size());
2097
2098  // alignment: Allocate memory large enough to include an aligned range of the right size and
2099  // cut off the leading and trailing waste pages.
2100  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2101  const size_t extra_size = size + alignment_hint;
2102
2103  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2104  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2105  int flags = MAP_ANONYMOUS | MAP_SHARED;
2106
2107  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2108  // it means if wishaddress is given but MAP_FIXED is not set.
2109  //
2110  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2111  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2112  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2113  // get clobbered.
2114  if (requested_addr != NULL) {
2115    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2116      flags |= MAP_FIXED;
2117    }
2118  }
2119
2120  char* addr = (char*)::mmap(requested_addr, extra_size,
2121      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2122
2123  if (addr == MAP_FAILED) {
2124    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2125    return NULL;
2126  }
2127
2128  // Handle alignment.
2129  char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2130  const size_t waste_pre = addr_aligned - addr;
2131  char* const addr_aligned_end = addr_aligned + size;
2132  const size_t waste_post = extra_size - waste_pre - size;
2133  if (waste_pre > 0) {
2134    ::munmap(addr, waste_pre);
2135  }
2136  if (waste_post > 0) {
2137    ::munmap(addr_aligned_end, waste_post);
2138  }
2139  addr = addr_aligned;
2140
2141  if (addr) {
2142    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2143      addr, addr + bytes, bytes);
2144  } else {
2145    if (requested_addr != NULL) {
2146      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2147    } else {
2148      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2149    }
2150  }
2151
2152  // bookkeeping
2153  vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2154
2155  // Test alignment, see above.
2156  assert0(is_aligned_to(addr, os::vm_page_size()));
2157
2158  return addr;
2159}
2160
2161static bool release_mmaped_memory(char* addr, size_t size) {
2162  assert0(is_aligned_to(addr, os::vm_page_size()));
2163  assert0(is_aligned_to(size, os::vm_page_size()));
2164
2165  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2166    addr, addr + size - 1);
2167  bool rc = false;
2168
2169  if (::munmap(addr, size) != 0) {
2170    trcVerbose("failed (%d)\n", errno);
2171    rc = false;
2172  } else {
2173    trcVerbose("ok.");
2174    rc = true;
2175  }
2176
2177  return rc;
2178}
2179
2180static bool uncommit_mmaped_memory(char* addr, size_t size) {
2181
2182  assert0(is_aligned_to(addr, os::vm_page_size()));
2183  assert0(is_aligned_to(size, os::vm_page_size()));
2184
2185  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2186    addr, addr + size - 1);
2187  bool rc = false;
2188
2189  // Uncommit mmap memory with msync MS_INVALIDATE.
2190  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2191    trcVerbose("failed (%d)\n", errno);
2192    rc = false;
2193  } else {
2194    trcVerbose("ok.");
2195    rc = true;
2196  }
2197
2198  return rc;
2199}
2200
2201int os::vm_page_size() {
2202  // Seems redundant as all get out.
2203  assert(os::Aix::page_size() != -1, "must call os::init");
2204  return os::Aix::page_size();
2205}
2206
2207// Aix allocates memory by pages.
2208int os::vm_allocation_granularity() {
2209  assert(os::Aix::page_size() != -1, "must call os::init");
2210  return os::Aix::page_size();
2211}
2212
2213#ifdef PRODUCT
2214static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2215                                    int err) {
2216  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2217          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2218          os::errno_name(err), err);
2219}
2220#endif
2221
2222void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2223                                  const char* mesg) {
2224  assert(mesg != NULL, "mesg must be specified");
2225  if (!pd_commit_memory(addr, size, exec)) {
2226    // Add extra info in product mode for vm_exit_out_of_memory():
2227    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2228    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
2229  }
2230}
2231
2232bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2233
2234  assert(is_aligned_to(addr, os::vm_page_size()),
2235    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2236    p2i(addr), os::vm_page_size());
2237  assert(is_aligned_to(size, os::vm_page_size()),
2238    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2239    size, os::vm_page_size());
2240
2241  vmembk_t* const vmi = vmembk_find(addr);
2242  guarantee0(vmi);
2243  vmi->assert_is_valid_subrange(addr, size);
2244
2245  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2246
2247  if (UseExplicitCommit) {
2248    // AIX commits memory on touch. So, touch all pages to be committed.
2249    for (char* p = addr; p < (addr + size); p += SIZE_4K) {
2250      *p = '\0';
2251    }
2252  }
2253
2254  return true;
2255}
2256
2257bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2258  return pd_commit_memory(addr, size, exec);
2259}
2260
2261void os::pd_commit_memory_or_exit(char* addr, size_t size,
2262                                  size_t alignment_hint, bool exec,
2263                                  const char* mesg) {
2264  // Alignment_hint is ignored on this OS.
2265  pd_commit_memory_or_exit(addr, size, exec, mesg);
2266}
2267
2268bool os::pd_uncommit_memory(char* addr, size_t size) {
2269  assert(is_aligned_to(addr, os::vm_page_size()),
2270    "addr " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2271    p2i(addr), os::vm_page_size());
2272  assert(is_aligned_to(size, os::vm_page_size()),
2273    "size " PTR_FORMAT " not aligned to vm_page_size (" PTR_FORMAT ")",
2274    size, os::vm_page_size());
2275
2276  // Dynamically do different things for mmap/shmat.
2277  const vmembk_t* const vmi = vmembk_find(addr);
2278  guarantee0(vmi);
2279  vmi->assert_is_valid_subrange(addr, size);
2280
2281  if (vmi->type == VMEM_SHMATED) {
2282    return uncommit_shmated_memory(addr, size);
2283  } else {
2284    return uncommit_mmaped_memory(addr, size);
2285  }
2286}
2287
2288bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2289  // Do not call this; no need to commit stack pages on AIX.
2290  ShouldNotReachHere();
2291  return true;
2292}
2293
2294bool os::remove_stack_guard_pages(char* addr, size_t size) {
2295  // Do not call this; no need to commit stack pages on AIX.
2296  ShouldNotReachHere();
2297  return true;
2298}
2299
2300void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2301}
2302
2303void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2304}
2305
2306void os::numa_make_global(char *addr, size_t bytes) {
2307}
2308
2309void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2310}
2311
2312bool os::numa_topology_changed() {
2313  return false;
2314}
2315
2316size_t os::numa_get_groups_num() {
2317  return 1;
2318}
2319
2320int os::numa_get_group_id() {
2321  return 0;
2322}
2323
2324size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2325  if (size > 0) {
2326    ids[0] = 0;
2327    return 1;
2328  }
2329  return 0;
2330}
2331
2332bool os::get_page_info(char *start, page_info* info) {
2333  return false;
2334}
2335
2336char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2337  return end;
2338}
2339
2340// Reserves and attaches a shared memory segment.
2341// Will assert if a wish address is given and could not be obtained.
2342char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2343
2344  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2345  // thereby clobbering old mappings at that place. That is probably
2346  // not intended, never used and almost certainly an error were it
2347  // ever be used this way (to try attaching at a specified address
2348  // without clobbering old mappings an alternate API exists,
2349  // os::attempt_reserve_memory_at()).
2350  // Instead of mimicking the dangerous coding of the other platforms, here I
2351  // just ignore the request address (release) or assert(debug).
2352  assert0(requested_addr == NULL);
2353
2354  // Always round to os::vm_page_size(), which may be larger than 4K.
2355  bytes = align_size_up(bytes, os::vm_page_size());
2356  const size_t alignment_hint0 =
2357    alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2358
2359  // In 4K mode always use mmap.
2360  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2361  if (os::vm_page_size() == SIZE_4K) {
2362    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2363  } else {
2364    if (bytes >= Use64KPagesThreshold) {
2365      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2366    } else {
2367      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2368    }
2369  }
2370}
2371
2372bool os::pd_release_memory(char* addr, size_t size) {
2373
2374  // Dynamically do different things for mmap/shmat.
2375  vmembk_t* const vmi = vmembk_find(addr);
2376  guarantee0(vmi);
2377
2378  // Always round to os::vm_page_size(), which may be larger than 4K.
2379  size = align_size_up(size, os::vm_page_size());
2380  addr = (char *)align_ptr_up(addr, os::vm_page_size());
2381
2382  bool rc = false;
2383  bool remove_bookkeeping = false;
2384  if (vmi->type == VMEM_SHMATED) {
2385    // For shmatted memory, we do:
2386    // - If user wants to release the whole range, release the memory (shmdt).
2387    // - If user only wants to release a partial range, uncommit (disclaim) that
2388    //   range. That way, at least, we do not use memory anymore (bust still page
2389    //   table space).
2390    vmi->assert_is_valid_subrange(addr, size);
2391    if (addr == vmi->addr && size == vmi->size) {
2392      rc = release_shmated_memory(addr, size);
2393      remove_bookkeeping = true;
2394    } else {
2395      rc = uncommit_shmated_memory(addr, size);
2396    }
2397  } else {
2398    // User may unmap partial regions but region has to be fully contained.
2399#ifdef ASSERT
2400    vmi->assert_is_valid_subrange(addr, size);
2401#endif
2402    rc = release_mmaped_memory(addr, size);
2403    remove_bookkeeping = true;
2404  }
2405
2406  // update bookkeeping
2407  if (rc && remove_bookkeeping) {
2408    vmembk_remove(vmi);
2409  }
2410
2411  return rc;
2412}
2413
2414static bool checked_mprotect(char* addr, size_t size, int prot) {
2415
2416  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2417  // not tell me if protection failed when trying to protect an un-protectable range.
2418  //
2419  // This means if the memory was allocated using shmget/shmat, protection wont work
2420  // but mprotect will still return 0:
2421  //
2422  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2423
2424  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2425
2426  if (!rc) {
2427    const char* const s_errno = os::errno_name(errno);
2428    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2429    return false;
2430  }
2431
2432  // mprotect success check
2433  //
2434  // Mprotect said it changed the protection but can I believe it?
2435  //
2436  // To be sure I need to check the protection afterwards. Try to
2437  // read from protected memory and check whether that causes a segfault.
2438  //
2439  if (!os::Aix::xpg_sus_mode()) {
2440
2441    if (CanUseSafeFetch32()) {
2442
2443      const bool read_protected =
2444        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2445         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2446
2447      if (prot & PROT_READ) {
2448        rc = !read_protected;
2449      } else {
2450        rc = read_protected;
2451      }
2452
2453      if (!rc) {
2454        if (os::Aix::on_pase()) {
2455          // There is an issue on older PASE systems where mprotect() will return success but the
2456          // memory will not be protected.
2457          // This has nothing to do with the problem of using mproect() on SPEC1170 incompatible
2458          // machines; we only see it rarely, when using mprotect() to protect the guard page of
2459          // a stack. It is an OS error.
2460          //
2461          // A valid strategy is just to try again. This usually works. :-/
2462
2463          ::usleep(1000);
2464          if (::mprotect(addr, size, prot) == 0) {
2465            const bool read_protected_2 =
2466              (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2467              SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2468            rc = true;
2469          }
2470        }
2471      }
2472    }
2473  }
2474
2475  assert(rc == true, "mprotect failed.");
2476
2477  return rc;
2478}
2479
2480// Set protections specified
2481bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2482  unsigned int p = 0;
2483  switch (prot) {
2484  case MEM_PROT_NONE: p = PROT_NONE; break;
2485  case MEM_PROT_READ: p = PROT_READ; break;
2486  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2487  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2488  default:
2489    ShouldNotReachHere();
2490  }
2491  // is_committed is unused.
2492  return checked_mprotect(addr, size, p);
2493}
2494
2495bool os::guard_memory(char* addr, size_t size) {
2496  return checked_mprotect(addr, size, PROT_NONE);
2497}
2498
2499bool os::unguard_memory(char* addr, size_t size) {
2500  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2501}
2502
2503// Large page support
2504
2505static size_t _large_page_size = 0;
2506
2507// Enable large page support if OS allows that.
2508void os::large_page_init() {
2509  return; // Nothing to do. See query_multipage_support and friends.
2510}
2511
2512char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2513  // reserve_memory_special() is used to allocate large paged memory. On AIX, we implement
2514  // 64k paged memory reservation using the normal memory allocation paths (os::reserve_memory()),
2515  // so this is not needed.
2516  assert(false, "should not be called on AIX");
2517  return NULL;
2518}
2519
2520bool os::release_memory_special(char* base, size_t bytes) {
2521  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2522  Unimplemented();
2523  return false;
2524}
2525
2526size_t os::large_page_size() {
2527  return _large_page_size;
2528}
2529
2530bool os::can_commit_large_page_memory() {
2531  // Does not matter, we do not support huge pages.
2532  return false;
2533}
2534
2535bool os::can_execute_large_page_memory() {
2536  // Does not matter, we do not support huge pages.
2537  return false;
2538}
2539
2540// Reserve memory at an arbitrary address, only if that area is
2541// available (and not reserved for something else).
2542char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2543  char* addr = NULL;
2544
2545  // Always round to os::vm_page_size(), which may be larger than 4K.
2546  bytes = align_size_up(bytes, os::vm_page_size());
2547
2548  // In 4K mode always use mmap.
2549  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2550  if (os::vm_page_size() == SIZE_4K) {
2551    return reserve_mmaped_memory(bytes, requested_addr, 0);
2552  } else {
2553    if (bytes >= Use64KPagesThreshold) {
2554      return reserve_shmated_memory(bytes, requested_addr, 0);
2555    } else {
2556      return reserve_mmaped_memory(bytes, requested_addr, 0);
2557    }
2558  }
2559
2560  return addr;
2561}
2562
2563size_t os::read(int fd, void *buf, unsigned int nBytes) {
2564  return ::read(fd, buf, nBytes);
2565}
2566
2567size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2568  return ::pread(fd, buf, nBytes, offset);
2569}
2570
2571void os::naked_short_sleep(jlong ms) {
2572  struct timespec req;
2573
2574  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2575  req.tv_sec = 0;
2576  if (ms > 0) {
2577    req.tv_nsec = (ms % 1000) * 1000000;
2578  }
2579  else {
2580    req.tv_nsec = 1;
2581  }
2582
2583  nanosleep(&req, NULL);
2584
2585  return;
2586}
2587
2588// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2589void os::infinite_sleep() {
2590  while (true) {    // sleep forever ...
2591    ::sleep(100);   // ... 100 seconds at a time
2592  }
2593}
2594
2595// Used to convert frequent JVM_Yield() to nops
2596bool os::dont_yield() {
2597  return DontYieldALot;
2598}
2599
2600void os::naked_yield() {
2601  sched_yield();
2602}
2603
2604////////////////////////////////////////////////////////////////////////////////
2605// thread priority support
2606
2607// From AIX manpage to pthread_setschedparam
2608// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2609//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2610//
2611// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2612// range from 40 to 80, where 40 is the least favored priority and 80
2613// is the most favored."
2614//
2615// (Actually, I doubt this even has an impact on AIX, as we do kernel
2616// scheduling there; however, this still leaves iSeries.)
2617//
2618// We use the same values for AIX and PASE.
2619int os::java_to_os_priority[CriticalPriority + 1] = {
2620  54,             // 0 Entry should never be used
2621
2622  55,             // 1 MinPriority
2623  55,             // 2
2624  56,             // 3
2625
2626  56,             // 4
2627  57,             // 5 NormPriority
2628  57,             // 6
2629
2630  58,             // 7
2631  58,             // 8
2632  59,             // 9 NearMaxPriority
2633
2634  60,             // 10 MaxPriority
2635
2636  60              // 11 CriticalPriority
2637};
2638
2639OSReturn os::set_native_priority(Thread* thread, int newpri) {
2640  if (!UseThreadPriorities) return OS_OK;
2641  pthread_t thr = thread->osthread()->pthread_id();
2642  int policy = SCHED_OTHER;
2643  struct sched_param param;
2644  param.sched_priority = newpri;
2645  int ret = pthread_setschedparam(thr, policy, &param);
2646
2647  if (ret != 0) {
2648    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2649        (int)thr, newpri, ret, os::errno_name(ret));
2650  }
2651  return (ret == 0) ? OS_OK : OS_ERR;
2652}
2653
2654OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2655  if (!UseThreadPriorities) {
2656    *priority_ptr = java_to_os_priority[NormPriority];
2657    return OS_OK;
2658  }
2659  pthread_t thr = thread->osthread()->pthread_id();
2660  int policy = SCHED_OTHER;
2661  struct sched_param param;
2662  int ret = pthread_getschedparam(thr, &policy, &param);
2663  *priority_ptr = param.sched_priority;
2664
2665  return (ret == 0) ? OS_OK : OS_ERR;
2666}
2667
2668// Hint to the underlying OS that a task switch would not be good.
2669// Void return because it's a hint and can fail.
2670void os::hint_no_preempt() {}
2671
2672////////////////////////////////////////////////////////////////////////////////
2673// suspend/resume support
2674
2675//  the low-level signal-based suspend/resume support is a remnant from the
2676//  old VM-suspension that used to be for java-suspension, safepoints etc,
2677//  within hotspot. Now there is a single use-case for this:
2678//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2679//      that runs in the watcher thread.
2680//  The remaining code is greatly simplified from the more general suspension
2681//  code that used to be used.
2682//
2683//  The protocol is quite simple:
2684//  - suspend:
2685//      - sends a signal to the target thread
2686//      - polls the suspend state of the osthread using a yield loop
2687//      - target thread signal handler (SR_handler) sets suspend state
2688//        and blocks in sigsuspend until continued
2689//  - resume:
2690//      - sets target osthread state to continue
2691//      - sends signal to end the sigsuspend loop in the SR_handler
2692//
2693//  Note that the SR_lock plays no role in this suspend/resume protocol.
2694//
2695
2696static void resume_clear_context(OSThread *osthread) {
2697  osthread->set_ucontext(NULL);
2698  osthread->set_siginfo(NULL);
2699}
2700
2701static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2702  osthread->set_ucontext(context);
2703  osthread->set_siginfo(siginfo);
2704}
2705
2706//
2707// Handler function invoked when a thread's execution is suspended or
2708// resumed. We have to be careful that only async-safe functions are
2709// called here (Note: most pthread functions are not async safe and
2710// should be avoided.)
2711//
2712// Note: sigwait() is a more natural fit than sigsuspend() from an
2713// interface point of view, but sigwait() prevents the signal hander
2714// from being run. libpthread would get very confused by not having
2715// its signal handlers run and prevents sigwait()'s use with the
2716// mutex granting granting signal.
2717//
2718// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2719//
2720static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2721  // Save and restore errno to avoid confusing native code with EINTR
2722  // after sigsuspend.
2723  int old_errno = errno;
2724
2725  Thread* thread = Thread::current();
2726  OSThread* osthread = thread->osthread();
2727  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2728
2729  os::SuspendResume::State current = osthread->sr.state();
2730  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2731    suspend_save_context(osthread, siginfo, context);
2732
2733    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2734    os::SuspendResume::State state = osthread->sr.suspended();
2735    if (state == os::SuspendResume::SR_SUSPENDED) {
2736      sigset_t suspend_set;  // signals for sigsuspend()
2737
2738      // get current set of blocked signals and unblock resume signal
2739      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2740      sigdelset(&suspend_set, SR_signum);
2741
2742      // wait here until we are resumed
2743      while (1) {
2744        sigsuspend(&suspend_set);
2745
2746        os::SuspendResume::State result = osthread->sr.running();
2747        if (result == os::SuspendResume::SR_RUNNING) {
2748          break;
2749        }
2750      }
2751
2752    } else if (state == os::SuspendResume::SR_RUNNING) {
2753      // request was cancelled, continue
2754    } else {
2755      ShouldNotReachHere();
2756    }
2757
2758    resume_clear_context(osthread);
2759  } else if (current == os::SuspendResume::SR_RUNNING) {
2760    // request was cancelled, continue
2761  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2762    // ignore
2763  } else {
2764    ShouldNotReachHere();
2765  }
2766
2767  errno = old_errno;
2768}
2769
2770static int SR_initialize() {
2771  struct sigaction act;
2772  char *s;
2773  // Get signal number to use for suspend/resume
2774  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2775    int sig = ::strtol(s, 0, 10);
2776    if (sig > MAX2(SIGSEGV, SIGBUS) &&  // See 4355769.
2777        sig < NSIG) {                   // Must be legal signal and fit into sigflags[].
2778      SR_signum = sig;
2779    } else {
2780      warning("You set _JAVA_SR_SIGNUM=%d. It must be in range [%d, %d]. Using %d instead.",
2781              sig, MAX2(SIGSEGV, SIGBUS)+1, NSIG-1, SR_signum);
2782    }
2783  }
2784
2785  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2786        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2787
2788  sigemptyset(&SR_sigset);
2789  sigaddset(&SR_sigset, SR_signum);
2790
2791  // Set up signal handler for suspend/resume.
2792  act.sa_flags = SA_RESTART|SA_SIGINFO;
2793  act.sa_handler = (void (*)(int)) SR_handler;
2794
2795  // SR_signum is blocked by default.
2796  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2797
2798  if (sigaction(SR_signum, &act, 0) == -1) {
2799    return -1;
2800  }
2801
2802  // Save signal flag
2803  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2804  return 0;
2805}
2806
2807static int SR_finalize() {
2808  return 0;
2809}
2810
2811static int sr_notify(OSThread* osthread) {
2812  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2813  assert_status(status == 0, status, "pthread_kill");
2814  return status;
2815}
2816
2817// "Randomly" selected value for how long we want to spin
2818// before bailing out on suspending a thread, also how often
2819// we send a signal to a thread we want to resume
2820static const int RANDOMLY_LARGE_INTEGER = 1000000;
2821static const int RANDOMLY_LARGE_INTEGER2 = 100;
2822
2823// returns true on success and false on error - really an error is fatal
2824// but this seems the normal response to library errors
2825static bool do_suspend(OSThread* osthread) {
2826  assert(osthread->sr.is_running(), "thread should be running");
2827  // mark as suspended and send signal
2828
2829  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2830    // failed to switch, state wasn't running?
2831    ShouldNotReachHere();
2832    return false;
2833  }
2834
2835  if (sr_notify(osthread) != 0) {
2836    // try to cancel, switch to running
2837
2838    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2839    if (result == os::SuspendResume::SR_RUNNING) {
2840      // cancelled
2841      return false;
2842    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2843      // somehow managed to suspend
2844      return true;
2845    } else {
2846      ShouldNotReachHere();
2847      return false;
2848    }
2849  }
2850
2851  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2852
2853  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2854    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2855      os::naked_yield();
2856    }
2857
2858    // timeout, try to cancel the request
2859    if (n >= RANDOMLY_LARGE_INTEGER) {
2860      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2861      if (cancelled == os::SuspendResume::SR_RUNNING) {
2862        return false;
2863      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2864        return true;
2865      } else {
2866        ShouldNotReachHere();
2867        return false;
2868      }
2869    }
2870  }
2871
2872  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2873  return true;
2874}
2875
2876static void do_resume(OSThread* osthread) {
2877  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2878
2879  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2880    // failed to switch to WAKEUP_REQUEST
2881    ShouldNotReachHere();
2882    return;
2883  }
2884
2885  while (!osthread->sr.is_running()) {
2886    if (sr_notify(osthread) == 0) {
2887      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2888        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2889          os::naked_yield();
2890        }
2891      }
2892    } else {
2893      ShouldNotReachHere();
2894    }
2895  }
2896
2897  guarantee(osthread->sr.is_running(), "Must be running!");
2898}
2899
2900///////////////////////////////////////////////////////////////////////////////////
2901// signal handling (except suspend/resume)
2902
2903// This routine may be used by user applications as a "hook" to catch signals.
2904// The user-defined signal handler must pass unrecognized signals to this
2905// routine, and if it returns true (non-zero), then the signal handler must
2906// return immediately. If the flag "abort_if_unrecognized" is true, then this
2907// routine will never retun false (zero), but instead will execute a VM panic
2908// routine kill the process.
2909//
2910// If this routine returns false, it is OK to call it again. This allows
2911// the user-defined signal handler to perform checks either before or after
2912// the VM performs its own checks. Naturally, the user code would be making
2913// a serious error if it tried to handle an exception (such as a null check
2914// or breakpoint) that the VM was generating for its own correct operation.
2915//
2916// This routine may recognize any of the following kinds of signals:
2917//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2918// It should be consulted by handlers for any of those signals.
2919//
2920// The caller of this routine must pass in the three arguments supplied
2921// to the function referred to in the "sa_sigaction" (not the "sa_handler")
2922// field of the structure passed to sigaction(). This routine assumes that
2923// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2924//
2925// Note that the VM will print warnings if it detects conflicting signal
2926// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2927//
2928extern "C" JNIEXPORT int
2929JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2930
2931// Set thread signal mask (for some reason on AIX sigthreadmask() seems
2932// to be the thing to call; documentation is not terribly clear about whether
2933// pthread_sigmask also works, and if it does, whether it does the same.
2934bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2935  const int rc = ::pthread_sigmask(how, set, oset);
2936  // return value semantics differ slightly for error case:
2937  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2938  // (so, pthread_sigmask is more theadsafe for error handling)
2939  // But success is always 0.
2940  return rc == 0 ? true : false;
2941}
2942
2943// Function to unblock all signals which are, according
2944// to POSIX, typical program error signals. If they happen while being blocked,
2945// they typically will bring down the process immediately.
2946bool unblock_program_error_signals() {
2947  sigset_t set;
2948  ::sigemptyset(&set);
2949  ::sigaddset(&set, SIGILL);
2950  ::sigaddset(&set, SIGBUS);
2951  ::sigaddset(&set, SIGFPE);
2952  ::sigaddset(&set, SIGSEGV);
2953  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2954}
2955
2956// Renamed from 'signalHandler' to avoid collision with other shared libs.
2957void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2958  assert(info != NULL && uc != NULL, "it must be old kernel");
2959
2960  // Never leave program error signals blocked;
2961  // on all our platforms they would bring down the process immediately when
2962  // getting raised while being blocked.
2963  unblock_program_error_signals();
2964
2965  int orig_errno = errno;  // Preserve errno value over signal handler.
2966  JVM_handle_aix_signal(sig, info, uc, true);
2967  errno = orig_errno;
2968}
2969
2970// This boolean allows users to forward their own non-matching signals
2971// to JVM_handle_aix_signal, harmlessly.
2972bool os::Aix::signal_handlers_are_installed = false;
2973
2974// For signal-chaining
2975struct sigaction sigact[NSIG];
2976sigset_t sigs;
2977bool os::Aix::libjsig_is_loaded = false;
2978typedef struct sigaction *(*get_signal_t)(int);
2979get_signal_t os::Aix::get_signal_action = NULL;
2980
2981struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2982  struct sigaction *actp = NULL;
2983
2984  if (libjsig_is_loaded) {
2985    // Retrieve the old signal handler from libjsig
2986    actp = (*get_signal_action)(sig);
2987  }
2988  if (actp == NULL) {
2989    // Retrieve the preinstalled signal handler from jvm
2990    actp = get_preinstalled_handler(sig);
2991  }
2992
2993  return actp;
2994}
2995
2996static bool call_chained_handler(struct sigaction *actp, int sig,
2997                                 siginfo_t *siginfo, void *context) {
2998  // Call the old signal handler
2999  if (actp->sa_handler == SIG_DFL) {
3000    // It's more reasonable to let jvm treat it as an unexpected exception
3001    // instead of taking the default action.
3002    return false;
3003  } else if (actp->sa_handler != SIG_IGN) {
3004    if ((actp->sa_flags & SA_NODEFER) == 0) {
3005      // automaticlly block the signal
3006      sigaddset(&(actp->sa_mask), sig);
3007    }
3008
3009    sa_handler_t hand = NULL;
3010    sa_sigaction_t sa = NULL;
3011    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3012    // retrieve the chained handler
3013    if (siginfo_flag_set) {
3014      sa = actp->sa_sigaction;
3015    } else {
3016      hand = actp->sa_handler;
3017    }
3018
3019    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3020      actp->sa_handler = SIG_DFL;
3021    }
3022
3023    // try to honor the signal mask
3024    sigset_t oset;
3025    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3026
3027    // call into the chained handler
3028    if (siginfo_flag_set) {
3029      (*sa)(sig, siginfo, context);
3030    } else {
3031      (*hand)(sig);
3032    }
3033
3034    // restore the signal mask
3035    pthread_sigmask(SIG_SETMASK, &oset, 0);
3036  }
3037  // Tell jvm's signal handler the signal is taken care of.
3038  return true;
3039}
3040
3041bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3042  bool chained = false;
3043  // signal-chaining
3044  if (UseSignalChaining) {
3045    struct sigaction *actp = get_chained_signal_action(sig);
3046    if (actp != NULL) {
3047      chained = call_chained_handler(actp, sig, siginfo, context);
3048    }
3049  }
3050  return chained;
3051}
3052
3053struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3054  if (sigismember(&sigs, sig)) {
3055    return &sigact[sig];
3056  }
3057  return NULL;
3058}
3059
3060void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3061  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3062  sigact[sig] = oldAct;
3063  sigaddset(&sigs, sig);
3064}
3065
3066// for diagnostic
3067int sigflags[NSIG];
3068
3069int os::Aix::get_our_sigflags(int sig) {
3070  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3071  return sigflags[sig];
3072}
3073
3074void os::Aix::set_our_sigflags(int sig, int flags) {
3075  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3076  if (sig > 0 && sig < NSIG) {
3077    sigflags[sig] = flags;
3078  }
3079}
3080
3081void os::Aix::set_signal_handler(int sig, bool set_installed) {
3082  // Check for overwrite.
3083  struct sigaction oldAct;
3084  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3085
3086  void* oldhand = oldAct.sa_sigaction
3087    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3088    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3089  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3090      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3091      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3092    if (AllowUserSignalHandlers || !set_installed) {
3093      // Do not overwrite; user takes responsibility to forward to us.
3094      return;
3095    } else if (UseSignalChaining) {
3096      // save the old handler in jvm
3097      save_preinstalled_handler(sig, oldAct);
3098      // libjsig also interposes the sigaction() call below and saves the
3099      // old sigaction on it own.
3100    } else {
3101      fatal("Encountered unexpected pre-existing sigaction handler "
3102            "%#lx for signal %d.", (long)oldhand, sig);
3103    }
3104  }
3105
3106  struct sigaction sigAct;
3107  sigfillset(&(sigAct.sa_mask));
3108  if (!set_installed) {
3109    sigAct.sa_handler = SIG_DFL;
3110    sigAct.sa_flags = SA_RESTART;
3111  } else {
3112    sigAct.sa_sigaction = javaSignalHandler;
3113    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3114  }
3115  // Save flags, which are set by ours
3116  assert(sig > 0 && sig < NSIG, "vm signal out of expected range");
3117  sigflags[sig] = sigAct.sa_flags;
3118
3119  int ret = sigaction(sig, &sigAct, &oldAct);
3120  assert(ret == 0, "check");
3121
3122  void* oldhand2 = oldAct.sa_sigaction
3123                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3124                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3125  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3126}
3127
3128// install signal handlers for signals that HotSpot needs to
3129// handle in order to support Java-level exception handling.
3130void os::Aix::install_signal_handlers() {
3131  if (!signal_handlers_are_installed) {
3132    signal_handlers_are_installed = true;
3133
3134    // signal-chaining
3135    typedef void (*signal_setting_t)();
3136    signal_setting_t begin_signal_setting = NULL;
3137    signal_setting_t end_signal_setting = NULL;
3138    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3139                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3140    if (begin_signal_setting != NULL) {
3141      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3142                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3143      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3144                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3145      libjsig_is_loaded = true;
3146      assert(UseSignalChaining, "should enable signal-chaining");
3147    }
3148    if (libjsig_is_loaded) {
3149      // Tell libjsig jvm is setting signal handlers.
3150      (*begin_signal_setting)();
3151    }
3152
3153    ::sigemptyset(&sigs);
3154    set_signal_handler(SIGSEGV, true);
3155    set_signal_handler(SIGPIPE, true);
3156    set_signal_handler(SIGBUS, true);
3157    set_signal_handler(SIGILL, true);
3158    set_signal_handler(SIGFPE, true);
3159    set_signal_handler(SIGTRAP, true);
3160    set_signal_handler(SIGXFSZ, true);
3161    set_signal_handler(SIGDANGER, true);
3162
3163    if (libjsig_is_loaded) {
3164      // Tell libjsig jvm finishes setting signal handlers.
3165      (*end_signal_setting)();
3166    }
3167
3168    // We don't activate signal checker if libjsig is in place, we trust ourselves
3169    // and if UserSignalHandler is installed all bets are off.
3170    // Log that signal checking is off only if -verbose:jni is specified.
3171    if (CheckJNICalls) {
3172      if (libjsig_is_loaded) {
3173        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3174        check_signals = false;
3175      }
3176      if (AllowUserSignalHandlers) {
3177        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3178        check_signals = false;
3179      }
3180      // Need to initialize check_signal_done.
3181      ::sigemptyset(&check_signal_done);
3182    }
3183  }
3184}
3185
3186static const char* get_signal_handler_name(address handler,
3187                                           char* buf, int buflen) {
3188  int offset;
3189  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3190  if (found) {
3191    // skip directory names
3192    const char *p1, *p2;
3193    p1 = buf;
3194    size_t len = strlen(os::file_separator());
3195    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3196    // The way os::dll_address_to_library_name is implemented on Aix
3197    // right now, it always returns -1 for the offset which is not
3198    // terribly informative.
3199    // Will fix that. For now, omit the offset.
3200    jio_snprintf(buf, buflen, "%s", p1);
3201  } else {
3202    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3203  }
3204  return buf;
3205}
3206
3207static void print_signal_handler(outputStream* st, int sig,
3208                                 char* buf, size_t buflen) {
3209  struct sigaction sa;
3210  sigaction(sig, NULL, &sa);
3211
3212  st->print("%s: ", os::exception_name(sig, buf, buflen));
3213
3214  address handler = (sa.sa_flags & SA_SIGINFO)
3215    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3216    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3217
3218  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3219    st->print("SIG_DFL");
3220  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3221    st->print("SIG_IGN");
3222  } else {
3223    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3224  }
3225
3226  // Print readable mask.
3227  st->print(", sa_mask[0]=");
3228  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3229
3230  address rh = VMError::get_resetted_sighandler(sig);
3231  // May be, handler was resetted by VMError?
3232  if (rh != NULL) {
3233    handler = rh;
3234    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3235  }
3236
3237  // Print textual representation of sa_flags.
3238  st->print(", sa_flags=");
3239  os::Posix::print_sa_flags(st, sa.sa_flags);
3240
3241  // Check: is it our handler?
3242  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3243      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3244    // It is our signal handler.
3245    // Check for flags, reset system-used one!
3246    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3247      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3248                os::Aix::get_our_sigflags(sig));
3249    }
3250  }
3251  st->cr();
3252}
3253
3254#define DO_SIGNAL_CHECK(sig) \
3255  if (!sigismember(&check_signal_done, sig)) \
3256    os::Aix::check_signal_handler(sig)
3257
3258// This method is a periodic task to check for misbehaving JNI applications
3259// under CheckJNI, we can add any periodic checks here
3260
3261void os::run_periodic_checks() {
3262
3263  if (check_signals == false) return;
3264
3265  // SEGV and BUS if overridden could potentially prevent
3266  // generation of hs*.log in the event of a crash, debugging
3267  // such a case can be very challenging, so we absolutely
3268  // check the following for a good measure:
3269  DO_SIGNAL_CHECK(SIGSEGV);
3270  DO_SIGNAL_CHECK(SIGILL);
3271  DO_SIGNAL_CHECK(SIGFPE);
3272  DO_SIGNAL_CHECK(SIGBUS);
3273  DO_SIGNAL_CHECK(SIGPIPE);
3274  DO_SIGNAL_CHECK(SIGXFSZ);
3275  if (UseSIGTRAP) {
3276    DO_SIGNAL_CHECK(SIGTRAP);
3277  }
3278  DO_SIGNAL_CHECK(SIGDANGER);
3279
3280  // ReduceSignalUsage allows the user to override these handlers
3281  // see comments at the very top and jvm_solaris.h
3282  if (!ReduceSignalUsage) {
3283    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3284    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3285    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3286    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3287  }
3288
3289  DO_SIGNAL_CHECK(SR_signum);
3290}
3291
3292typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3293
3294static os_sigaction_t os_sigaction = NULL;
3295
3296void os::Aix::check_signal_handler(int sig) {
3297  char buf[O_BUFLEN];
3298  address jvmHandler = NULL;
3299
3300  struct sigaction act;
3301  if (os_sigaction == NULL) {
3302    // only trust the default sigaction, in case it has been interposed
3303    os_sigaction = CAST_TO_FN_PTR(os_sigaction_t, dlsym(RTLD_DEFAULT, "sigaction"));
3304    if (os_sigaction == NULL) return;
3305  }
3306
3307  os_sigaction(sig, (struct sigaction*)NULL, &act);
3308
3309  address thisHandler = (act.sa_flags & SA_SIGINFO)
3310    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3311    : CAST_FROM_FN_PTR(address, act.sa_handler);
3312
3313  switch(sig) {
3314  case SIGSEGV:
3315  case SIGBUS:
3316  case SIGFPE:
3317  case SIGPIPE:
3318  case SIGILL:
3319  case SIGXFSZ:
3320    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3321    break;
3322
3323  case SHUTDOWN1_SIGNAL:
3324  case SHUTDOWN2_SIGNAL:
3325  case SHUTDOWN3_SIGNAL:
3326  case BREAK_SIGNAL:
3327    jvmHandler = (address)user_handler();
3328    break;
3329
3330  default:
3331    if (sig == SR_signum) {
3332      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3333    } else {
3334      return;
3335    }
3336    break;
3337  }
3338
3339  if (thisHandler != jvmHandler) {
3340    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3341    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3342    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3343    // No need to check this sig any longer
3344    sigaddset(&check_signal_done, sig);
3345    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3346    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3347      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3348                    exception_name(sig, buf, O_BUFLEN));
3349    }
3350  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3351    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3352    tty->print("expected:");
3353    os::Posix::print_sa_flags(tty, os::Aix::get_our_sigflags(sig));
3354    tty->cr();
3355    tty->print("  found:");
3356    os::Posix::print_sa_flags(tty, act.sa_flags);
3357    tty->cr();
3358    // No need to check this sig any longer
3359    sigaddset(&check_signal_done, sig);
3360  }
3361
3362  // Dump all the signal
3363  if (sigismember(&check_signal_done, sig)) {
3364    print_signal_handlers(tty, buf, O_BUFLEN);
3365  }
3366}
3367
3368// To install functions for atexit system call
3369extern "C" {
3370  static void perfMemory_exit_helper() {
3371    perfMemory_exit();
3372  }
3373}
3374
3375// This is called _before_ the most of global arguments have been parsed.
3376void os::init(void) {
3377  // This is basic, we want to know if that ever changes.
3378  // (Shared memory boundary is supposed to be a 256M aligned.)
3379  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3380
3381  // Record process break at startup.
3382  g_brk_at_startup = (address) ::sbrk(0);
3383  assert(g_brk_at_startup != (address) -1, "sbrk failed");
3384
3385  // First off, we need to know whether we run on AIX or PASE, and
3386  // the OS level we run on.
3387  os::Aix::initialize_os_info();
3388
3389  // Scan environment (SPEC1170 behaviour, etc).
3390  os::Aix::scan_environment();
3391
3392  // Probe multipage support.
3393  query_multipage_support();
3394
3395  // Act like we only have one page size by eliminating corner cases which
3396  // we did not support very well anyway.
3397  // We have two input conditions:
3398  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3399  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3400  //    setting.
3401  //    Data segment page size is important for us because it defines the thread stack page
3402  //    size, which is needed for guard page handling, stack banging etc.
3403  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3404  //    and should be allocated with 64k pages.
3405  //
3406  // So, we do the following:
3407  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3408  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3409  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3410  // 64k          no              --- AIX 5.2 ? ---
3411  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3412
3413  // We explicitly leave no option to change page size, because only upgrading would work,
3414  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3415
3416  if (g_multipage_support.datapsize == SIZE_4K) {
3417    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3418    if (g_multipage_support.can_use_64K_pages) {
3419      // .. but we are able to use 64K pages dynamically.
3420      // This would be typical for java launchers which are not linked
3421      // with datapsize=64K (like, any other launcher but our own).
3422      //
3423      // In this case it would be smart to allocate the java heap with 64K
3424      // to get the performance benefit, and to fake 64k pages for the
3425      // data segment (when dealing with thread stacks).
3426      //
3427      // However, leave a possibility to downgrade to 4K, using
3428      // -XX:-Use64KPages.
3429      if (Use64KPages) {
3430        trcVerbose("64K page mode (faked for data segment)");
3431        Aix::_page_size = SIZE_64K;
3432      } else {
3433        trcVerbose("4K page mode (Use64KPages=off)");
3434        Aix::_page_size = SIZE_4K;
3435      }
3436    } else {
3437      // .. and not able to allocate 64k pages dynamically. Here, just
3438      // fall back to 4K paged mode and use mmap for everything.
3439      trcVerbose("4K page mode");
3440      Aix::_page_size = SIZE_4K;
3441      FLAG_SET_ERGO(bool, Use64KPages, false);
3442    }
3443  } else {
3444    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3445    // This normally means that we can allocate 64k pages dynamically.
3446    // (There is one special case where this may be false: EXTSHM=on.
3447    // but we decided to not support that mode).
3448    assert0(g_multipage_support.can_use_64K_pages);
3449    Aix::_page_size = SIZE_64K;
3450    trcVerbose("64K page mode");
3451    FLAG_SET_ERGO(bool, Use64KPages, true);
3452  }
3453
3454  // Short-wire stack page size to base page size; if that works, we just remove
3455  // that stack page size altogether.
3456  Aix::_stack_page_size = Aix::_page_size;
3457
3458  // For now UseLargePages is just ignored.
3459  FLAG_SET_ERGO(bool, UseLargePages, false);
3460  _page_sizes[0] = 0;
3461
3462  // debug trace
3463  trcVerbose("os::vm_page_size %s", describe_pagesize(os::vm_page_size()));
3464
3465  // Next, we need to initialize libo4 and libperfstat libraries.
3466  if (os::Aix::on_pase()) {
3467    os::Aix::initialize_libo4();
3468  } else {
3469    os::Aix::initialize_libperfstat();
3470  }
3471
3472  // Reset the perfstat information provided by ODM.
3473  if (os::Aix::on_aix()) {
3474    libperfstat::perfstat_reset();
3475  }
3476
3477  // Now initialze basic system properties. Note that for some of the values we
3478  // need libperfstat etc.
3479  os::Aix::initialize_system_info();
3480
3481  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3482
3483  init_random(1234567);
3484
3485  ThreadCritical::initialize();
3486
3487  // Main_thread points to the aboriginal thread.
3488  Aix::_main_thread = pthread_self();
3489
3490  initial_time_count = os::elapsed_counter();
3491}
3492
3493// This is called _after_ the global arguments have been parsed.
3494jint os::init_2(void) {
3495
3496  if (os::Aix::on_pase()) {
3497    trcVerbose("Running on PASE.");
3498  } else {
3499    trcVerbose("Running on AIX (not PASE).");
3500  }
3501
3502  trcVerbose("processor count: %d", os::_processor_count);
3503  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3504
3505  // Initially build up the loaded dll map.
3506  LoadedLibraries::reload();
3507  if (Verbose) {
3508    trcVerbose("Loaded Libraries: ");
3509    LoadedLibraries::print(tty);
3510  }
3511
3512  const int page_size = Aix::page_size();
3513  const int map_size = page_size;
3514
3515  address map_address = (address) MAP_FAILED;
3516  const int prot  = PROT_READ;
3517  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3518
3519  // Use optimized addresses for the polling page,
3520  // e.g. map it to a special 32-bit address.
3521  if (OptimizePollingPageLocation) {
3522    // architecture-specific list of address wishes:
3523    address address_wishes[] = {
3524      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3525      // PPC64: all address wishes are non-negative 32 bit values where
3526      // the lower 16 bits are all zero. we can load these addresses
3527      // with a single ppc_lis instruction.
3528      (address) 0x30000000, (address) 0x31000000,
3529      (address) 0x32000000, (address) 0x33000000,
3530      (address) 0x40000000, (address) 0x41000000,
3531      (address) 0x42000000, (address) 0x43000000,
3532      (address) 0x50000000, (address) 0x51000000,
3533      (address) 0x52000000, (address) 0x53000000,
3534      (address) 0x60000000, (address) 0x61000000,
3535      (address) 0x62000000, (address) 0x63000000
3536    };
3537    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3538
3539    // iterate over the list of address wishes:
3540    for (int i=0; i<address_wishes_length; i++) {
3541      // Try to map with current address wish.
3542      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3543      // fail if the address is already mapped.
3544      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3545                                     map_size, prot,
3546                                     flags | MAP_FIXED,
3547                                     -1, 0);
3548      trcVerbose("SafePoint Polling  Page address: %p (wish) => %p",
3549                   address_wishes[i], map_address + (ssize_t)page_size);
3550
3551      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3552        // Map succeeded and map_address is at wished address, exit loop.
3553        break;
3554      }
3555
3556      if (map_address != (address) MAP_FAILED) {
3557        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3558        ::munmap(map_address, map_size);
3559        map_address = (address) MAP_FAILED;
3560      }
3561      // Map failed, continue loop.
3562    }
3563  } // end OptimizePollingPageLocation
3564
3565  if (map_address == (address) MAP_FAILED) {
3566    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3567  }
3568  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3569  os::set_polling_page(map_address);
3570
3571  if (!UseMembar) {
3572    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3573    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3574    os::set_memory_serialize_page(mem_serialize_page);
3575
3576    trcVerbose("Memory Serialize  Page address: %p - %p, size %IX (%IB)",
3577        mem_serialize_page, mem_serialize_page + Aix::page_size(),
3578        Aix::page_size(), Aix::page_size());
3579  }
3580
3581  // initialize suspend/resume support - must do this before signal_sets_init()
3582  if (SR_initialize() != 0) {
3583    perror("SR_initialize failed");
3584    return JNI_ERR;
3585  }
3586
3587  Aix::signal_sets_init();
3588  Aix::install_signal_handlers();
3589
3590  // Check minimum allowable stack size for thread creation and to initialize
3591  // the java system classes, including StackOverflowError - depends on page
3592  // size. Add a page for compiler2 recursion in main thread.
3593  // Add in 2*BytesPerWord times page size to account for VM stack during
3594  // class initialization depending on 32 or 64 bit VM.
3595  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3596                                    JavaThread::stack_guard_zone_size() +
3597                                    JavaThread::stack_shadow_zone_size() +
3598                                    (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3599
3600  os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3601
3602  size_t threadStackSizeInBytes = ThreadStackSize * K;
3603  if (threadStackSizeInBytes != 0 &&
3604      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3605    tty->print_cr("\nThe stack size specified is too small, "
3606                  "Specify at least %dk",
3607                  os::Aix::min_stack_allowed / K);
3608    return JNI_ERR;
3609  }
3610
3611  // Make the stack size a multiple of the page size so that
3612  // the yellow/red zones can be guarded.
3613  // Note that this can be 0, if no default stacksize was set.
3614  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3615
3616  if (UseNUMA) {
3617    UseNUMA = false;
3618    warning("NUMA optimizations are not available on this OS.");
3619  }
3620
3621  if (MaxFDLimit) {
3622    // Set the number of file descriptors to max. print out error
3623    // if getrlimit/setrlimit fails but continue regardless.
3624    struct rlimit nbr_files;
3625    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3626    if (status != 0) {
3627      log_info(os)("os::init_2 getrlimit failed: %s", os::strerror(errno));
3628    } else {
3629      nbr_files.rlim_cur = nbr_files.rlim_max;
3630      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3631      if (status != 0) {
3632        log_info(os)("os::init_2 setrlimit failed: %s", os::strerror(errno));
3633      }
3634    }
3635  }
3636
3637  if (PerfAllowAtExitRegistration) {
3638    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3639    // At exit functions can be delayed until process exit time, which
3640    // can be problematic for embedded VM situations. Embedded VMs should
3641    // call DestroyJavaVM() to assure that VM resources are released.
3642
3643    // Note: perfMemory_exit_helper atexit function may be removed in
3644    // the future if the appropriate cleanup code can be added to the
3645    // VM_Exit VMOperation's doit method.
3646    if (atexit(perfMemory_exit_helper) != 0) {
3647      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3648    }
3649  }
3650
3651  return JNI_OK;
3652}
3653
3654// Mark the polling page as unreadable
3655void os::make_polling_page_unreadable(void) {
3656  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3657    fatal("Could not disable polling page");
3658  }
3659};
3660
3661// Mark the polling page as readable
3662void os::make_polling_page_readable(void) {
3663  // Changed according to os_linux.cpp.
3664  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3665    fatal("Could not enable polling page at " PTR_FORMAT, _polling_page);
3666  }
3667};
3668
3669int os::active_processor_count() {
3670  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3671  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3672  return online_cpus;
3673}
3674
3675void os::set_native_thread_name(const char *name) {
3676  // Not yet implemented.
3677  return;
3678}
3679
3680bool os::distribute_processes(uint length, uint* distribution) {
3681  // Not yet implemented.
3682  return false;
3683}
3684
3685bool os::bind_to_processor(uint processor_id) {
3686  // Not yet implemented.
3687  return false;
3688}
3689
3690void os::SuspendedThreadTask::internal_do_task() {
3691  if (do_suspend(_thread->osthread())) {
3692    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3693    do_task(context);
3694    do_resume(_thread->osthread());
3695  }
3696}
3697
3698class PcFetcher : public os::SuspendedThreadTask {
3699public:
3700  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3701  ExtendedPC result();
3702protected:
3703  void do_task(const os::SuspendedThreadTaskContext& context);
3704private:
3705  ExtendedPC _epc;
3706};
3707
3708ExtendedPC PcFetcher::result() {
3709  guarantee(is_done(), "task is not done yet.");
3710  return _epc;
3711}
3712
3713void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3714  Thread* thread = context.thread();
3715  OSThread* osthread = thread->osthread();
3716  if (osthread->ucontext() != NULL) {
3717    _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
3718  } else {
3719    // NULL context is unexpected, double-check this is the VMThread.
3720    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3721  }
3722}
3723
3724// Suspends the target using the signal mechanism and then grabs the PC before
3725// resuming the target. Used by the flat-profiler only
3726ExtendedPC os::get_thread_pc(Thread* thread) {
3727  // Make sure that it is called by the watcher for the VMThread.
3728  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3729  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3730
3731  PcFetcher fetcher(thread);
3732  fetcher.run();
3733  return fetcher.result();
3734}
3735
3736////////////////////////////////////////////////////////////////////////////////
3737// debug support
3738
3739bool os::find(address addr, outputStream* st) {
3740
3741  st->print(PTR_FORMAT ": ", addr);
3742
3743  loaded_module_t lm;
3744  if (LoadedLibraries::find_for_text_address(addr, &lm) != NULL ||
3745      LoadedLibraries::find_for_data_address(addr, &lm) != NULL) {
3746    st->print_cr("%s", lm.path);
3747    return true;
3748  }
3749
3750  return false;
3751}
3752
3753////////////////////////////////////////////////////////////////////////////////
3754// misc
3755
3756// This does not do anything on Aix. This is basically a hook for being
3757// able to use structured exception handling (thread-local exception filters)
3758// on, e.g., Win32.
3759void
3760os::os_exception_wrapper(java_call_t f, JavaValue* value, const methodHandle& method,
3761                         JavaCallArguments* args, Thread* thread) {
3762  f(value, method, args, thread);
3763}
3764
3765void os::print_statistics() {
3766}
3767
3768bool os::message_box(const char* title, const char* message) {
3769  int i;
3770  fdStream err(defaultStream::error_fd());
3771  for (i = 0; i < 78; i++) err.print_raw("=");
3772  err.cr();
3773  err.print_raw_cr(title);
3774  for (i = 0; i < 78; i++) err.print_raw("-");
3775  err.cr();
3776  err.print_raw_cr(message);
3777  for (i = 0; i < 78; i++) err.print_raw("=");
3778  err.cr();
3779
3780  char buf[16];
3781  // Prevent process from exiting upon "read error" without consuming all CPU
3782  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3783
3784  return buf[0] == 'y' || buf[0] == 'Y';
3785}
3786
3787int os::stat(const char *path, struct stat *sbuf) {
3788  char pathbuf[MAX_PATH];
3789  if (strlen(path) > MAX_PATH - 1) {
3790    errno = ENAMETOOLONG;
3791    return -1;
3792  }
3793  os::native_path(strcpy(pathbuf, path));
3794  return ::stat(pathbuf, sbuf);
3795}
3796
3797bool os::check_heap(bool force) {
3798  return true;
3799}
3800
3801// Is a (classpath) directory empty?
3802bool os::dir_is_empty(const char* path) {
3803  DIR *dir = NULL;
3804  struct dirent *ptr;
3805
3806  dir = opendir(path);
3807  if (dir == NULL) return true;
3808
3809  /* Scan the directory */
3810  bool result = true;
3811  char buf[sizeof(struct dirent) + MAX_PATH];
3812  while (result && (ptr = ::readdir(dir)) != NULL) {
3813    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3814      result = false;
3815    }
3816  }
3817  closedir(dir);
3818  return result;
3819}
3820
3821// This code originates from JDK's sysOpen and open64_w
3822// from src/solaris/hpi/src/system_md.c
3823
3824int os::open(const char *path, int oflag, int mode) {
3825
3826  if (strlen(path) > MAX_PATH - 1) {
3827    errno = ENAMETOOLONG;
3828    return -1;
3829  }
3830  int fd;
3831
3832  fd = ::open64(path, oflag, mode);
3833  if (fd == -1) return -1;
3834
3835  // If the open succeeded, the file might still be a directory.
3836  {
3837    struct stat64 buf64;
3838    int ret = ::fstat64(fd, &buf64);
3839    int st_mode = buf64.st_mode;
3840
3841    if (ret != -1) {
3842      if ((st_mode & S_IFMT) == S_IFDIR) {
3843        errno = EISDIR;
3844        ::close(fd);
3845        return -1;
3846      }
3847    } else {
3848      ::close(fd);
3849      return -1;
3850    }
3851  }
3852
3853  // All file descriptors that are opened in the JVM and not
3854  // specifically destined for a subprocess should have the
3855  // close-on-exec flag set. If we don't set it, then careless 3rd
3856  // party native code might fork and exec without closing all
3857  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3858  // UNIXProcess.c), and this in turn might:
3859  //
3860  // - cause end-of-file to fail to be detected on some file
3861  //   descriptors, resulting in mysterious hangs, or
3862  //
3863  // - might cause an fopen in the subprocess to fail on a system
3864  //   suffering from bug 1085341.
3865  //
3866  // (Yes, the default setting of the close-on-exec flag is a Unix
3867  // design flaw.)
3868  //
3869  // See:
3870  // 1085341: 32-bit stdio routines should support file descriptors >255
3871  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3872  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3873#ifdef FD_CLOEXEC
3874  {
3875    int flags = ::fcntl(fd, F_GETFD);
3876    if (flags != -1)
3877      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3878  }
3879#endif
3880
3881  return fd;
3882}
3883
3884// create binary file, rewriting existing file if required
3885int os::create_binary_file(const char* path, bool rewrite_existing) {
3886  int oflags = O_WRONLY | O_CREAT;
3887  if (!rewrite_existing) {
3888    oflags |= O_EXCL;
3889  }
3890  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3891}
3892
3893// return current position of file pointer
3894jlong os::current_file_offset(int fd) {
3895  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3896}
3897
3898// move file pointer to the specified offset
3899jlong os::seek_to_file_offset(int fd, jlong offset) {
3900  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3901}
3902
3903// This code originates from JDK's sysAvailable
3904// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3905
3906int os::available(int fd, jlong *bytes) {
3907  jlong cur, end;
3908  int mode;
3909  struct stat64 buf64;
3910
3911  if (::fstat64(fd, &buf64) >= 0) {
3912    mode = buf64.st_mode;
3913    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3914      int n;
3915      if (::ioctl(fd, FIONREAD, &n) >= 0) {
3916        *bytes = n;
3917        return 1;
3918      }
3919    }
3920  }
3921  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3922    return 0;
3923  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3924    return 0;
3925  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3926    return 0;
3927  }
3928  *bytes = end - cur;
3929  return 1;
3930}
3931
3932// Map a block of memory.
3933char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3934                        char *addr, size_t bytes, bool read_only,
3935                        bool allow_exec) {
3936  int prot;
3937  int flags = MAP_PRIVATE;
3938
3939  if (read_only) {
3940    prot = PROT_READ;
3941    flags = MAP_SHARED;
3942  } else {
3943    prot = PROT_READ | PROT_WRITE;
3944    flags = MAP_PRIVATE;
3945  }
3946
3947  if (allow_exec) {
3948    prot |= PROT_EXEC;
3949  }
3950
3951  if (addr != NULL) {
3952    flags |= MAP_FIXED;
3953  }
3954
3955  // Allow anonymous mappings if 'fd' is -1.
3956  if (fd == -1) {
3957    flags |= MAP_ANONYMOUS;
3958  }
3959
3960  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
3961                                     fd, file_offset);
3962  if (mapped_address == MAP_FAILED) {
3963    return NULL;
3964  }
3965  return mapped_address;
3966}
3967
3968// Remap a block of memory.
3969char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
3970                          char *addr, size_t bytes, bool read_only,
3971                          bool allow_exec) {
3972  // same as map_memory() on this OS
3973  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
3974                        allow_exec);
3975}
3976
3977// Unmap a block of memory.
3978bool os::pd_unmap_memory(char* addr, size_t bytes) {
3979  return munmap(addr, bytes) == 0;
3980}
3981
3982// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
3983// are used by JVM M&M and JVMTI to get user+sys or user CPU time
3984// of a thread.
3985//
3986// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
3987// the fast estimate available on the platform.
3988
3989jlong os::current_thread_cpu_time() {
3990  // return user + sys since the cost is the same
3991  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
3992  assert(n >= 0, "negative CPU time");
3993  return n;
3994}
3995
3996jlong os::thread_cpu_time(Thread* thread) {
3997  // consistent with what current_thread_cpu_time() returns
3998  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
3999  assert(n >= 0, "negative CPU time");
4000  return n;
4001}
4002
4003jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4004  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4005  assert(n >= 0, "negative CPU time");
4006  return n;
4007}
4008
4009static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4010  bool error = false;
4011
4012  jlong sys_time = 0;
4013  jlong user_time = 0;
4014
4015  // Reimplemented using getthrds64().
4016  //
4017  // Works like this:
4018  // For the thread in question, get the kernel thread id. Then get the
4019  // kernel thread statistics using that id.
4020  //
4021  // This only works of course when no pthread scheduling is used,
4022  // i.e. there is a 1:1 relationship to kernel threads.
4023  // On AIX, see AIXTHREAD_SCOPE variable.
4024
4025  pthread_t pthtid = thread->osthread()->pthread_id();
4026
4027  // retrieve kernel thread id for the pthread:
4028  tid64_t tid = 0;
4029  struct __pthrdsinfo pinfo;
4030  // I just love those otherworldly IBM APIs which force me to hand down
4031  // dummy buffers for stuff I dont care for...
4032  char dummy[1];
4033  int dummy_size = sizeof(dummy);
4034  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4035                          dummy, &dummy_size) == 0) {
4036    tid = pinfo.__pi_tid;
4037  } else {
4038    tty->print_cr("pthread_getthrds_np failed.");
4039    error = true;
4040  }
4041
4042  // retrieve kernel timing info for that kernel thread
4043  if (!error) {
4044    struct thrdentry64 thrdentry;
4045    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4046      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4047      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4048    } else {
4049      tty->print_cr("pthread_getthrds_np failed.");
4050      error = true;
4051    }
4052  }
4053
4054  if (p_sys_time) {
4055    *p_sys_time = sys_time;
4056  }
4057
4058  if (p_user_time) {
4059    *p_user_time = user_time;
4060  }
4061
4062  if (error) {
4063    return false;
4064  }
4065
4066  return true;
4067}
4068
4069jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4070  jlong sys_time;
4071  jlong user_time;
4072
4073  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4074    return -1;
4075  }
4076
4077  return user_sys_cpu_time ? sys_time + user_time : user_time;
4078}
4079
4080void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4081  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4082  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4083  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4084  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4085}
4086
4087void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4088  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4089  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4090  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4091  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4092}
4093
4094bool os::is_thread_cpu_time_supported() {
4095  return true;
4096}
4097
4098// System loadavg support. Returns -1 if load average cannot be obtained.
4099// For now just return the system wide load average (no processor sets).
4100int os::loadavg(double values[], int nelem) {
4101
4102  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4103  guarantee(values, "argument error");
4104
4105  if (os::Aix::on_pase()) {
4106
4107    // AS/400 PASE: use libo4 porting library
4108    double v[3] = { 0.0, 0.0, 0.0 };
4109
4110    if (libo4::get_load_avg(v, v + 1, v + 2)) {
4111      for (int i = 0; i < nelem; i ++) {
4112        values[i] = v[i];
4113      }
4114      return nelem;
4115    } else {
4116      return -1;
4117    }
4118
4119  } else {
4120
4121    // AIX: use libperfstat
4122    libperfstat::cpuinfo_t ci;
4123    if (libperfstat::get_cpuinfo(&ci)) {
4124      for (int i = 0; i < nelem; i++) {
4125        values[i] = ci.loadavg[i];
4126      }
4127    } else {
4128      return -1;
4129    }
4130    return nelem;
4131  }
4132}
4133
4134void os::pause() {
4135  char filename[MAX_PATH];
4136  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4137    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4138  } else {
4139    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4140  }
4141
4142  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4143  if (fd != -1) {
4144    struct stat buf;
4145    ::close(fd);
4146    while (::stat(filename, &buf) == 0) {
4147      (void)::poll(NULL, 0, 100);
4148    }
4149  } else {
4150    trcVerbose("Could not open pause file '%s', continuing immediately.", filename);
4151  }
4152}
4153
4154bool os::Aix::is_primordial_thread() {
4155  if (pthread_self() == (pthread_t)1) {
4156    return true;
4157  } else {
4158    return false;
4159  }
4160}
4161
4162// OS recognitions (PASE/AIX, OS level) call this before calling any
4163// one of Aix::on_pase(), Aix::os_version() static
4164void os::Aix::initialize_os_info() {
4165
4166  assert(_on_pase == -1 && _os_version == 0, "already called.");
4167
4168  struct utsname uts;
4169  memset(&uts, 0, sizeof(uts));
4170  strcpy(uts.sysname, "?");
4171  if (::uname(&uts) == -1) {
4172    trcVerbose("uname failed (%d)", errno);
4173    guarantee(0, "Could not determine whether we run on AIX or PASE");
4174  } else {
4175    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4176               "node \"%s\" machine \"%s\"\n",
4177               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4178    const int major = atoi(uts.version);
4179    assert(major > 0, "invalid OS version");
4180    const int minor = atoi(uts.release);
4181    assert(minor > 0, "invalid OS release");
4182    _os_version = (major << 24) | (minor << 16);
4183    char ver_str[20] = {0};
4184    char *name_str = "unknown OS";
4185    if (strcmp(uts.sysname, "OS400") == 0) {
4186      // We run on AS/400 PASE. We do not support versions older than V5R4M0.
4187      _on_pase = 1;
4188      if (os_version_short() < 0x0504) {
4189        trcVerbose("OS/400 releases older than V5R4M0 not supported.");
4190        assert(false, "OS/400 release too old.");
4191      }
4192      name_str = "OS/400 (pase)";
4193      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u", major, minor);
4194    } else if (strcmp(uts.sysname, "AIX") == 0) {
4195      // We run on AIX. We do not support versions older than AIX 5.3.
4196      _on_pase = 0;
4197      // Determine detailed AIX version: Version, Release, Modification, Fix Level.
4198      odmWrapper::determine_os_kernel_version(&_os_version);
4199      if (os_version_short() < 0x0503) {
4200        trcVerbose("AIX release older than AIX 5.3 not supported.");
4201        assert(false, "AIX release too old.");
4202      }
4203      name_str = "AIX";
4204      jio_snprintf(ver_str, sizeof(ver_str), "%u.%u.%u.%u",
4205                   major, minor, (_os_version >> 8) & 0xFF, _os_version & 0xFF);
4206    } else {
4207      assert(false, name_str);
4208    }
4209    trcVerbose("We run on %s %s", name_str, ver_str);
4210  }
4211
4212  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4213} // end: os::Aix::initialize_os_info()
4214
4215// Scan environment for important settings which might effect the VM.
4216// Trace out settings. Warn about invalid settings and/or correct them.
4217//
4218// Must run after os::Aix::initialue_os_info().
4219void os::Aix::scan_environment() {
4220
4221  char* p;
4222  int rc;
4223
4224  // Warn explicity if EXTSHM=ON is used. That switch changes how
4225  // System V shared memory behaves. One effect is that page size of
4226  // shared memory cannot be change dynamically, effectivly preventing
4227  // large pages from working.
4228  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4229  // recommendation is (in OSS notes) to switch it off.
4230  p = ::getenv("EXTSHM");
4231  trcVerbose("EXTSHM=%s.", p ? p : "<unset>");
4232  if (p && strcasecmp(p, "ON") == 0) {
4233    _extshm = 1;
4234    trcVerbose("*** Unsupported mode! Please remove EXTSHM from your environment! ***");
4235    if (!AllowExtshm) {
4236      // We allow under certain conditions the user to continue. However, we want this
4237      // to be a fatal error by default. On certain AIX systems, leaving EXTSHM=ON means
4238      // that the VM is not able to allocate 64k pages for the heap.
4239      // We do not want to run with reduced performance.
4240      vm_exit_during_initialization("EXTSHM is ON. Please remove EXTSHM from your environment.");
4241    }
4242  } else {
4243    _extshm = 0;
4244  }
4245
4246  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4247  // Not tested, not supported.
4248  //
4249  // Note that it might be worth the trouble to test and to require it, if only to
4250  // get useful return codes for mprotect.
4251  //
4252  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4253  // exec() ? before loading the libjvm ? ....)
4254  p = ::getenv("XPG_SUS_ENV");
4255  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4256  if (p && strcmp(p, "ON") == 0) {
4257    _xpg_sus_mode = 1;
4258    trcVerbose("Unsupported setting: XPG_SUS_ENV=ON");
4259    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4260    // clobber address ranges. If we ever want to support that, we have to do some
4261    // testing first.
4262    guarantee(false, "XPG_SUS_ENV=ON not supported");
4263  } else {
4264    _xpg_sus_mode = 0;
4265  }
4266
4267  if (os::Aix::on_pase()) {
4268    p = ::getenv("QIBM_MULTI_THREADED");
4269    trcVerbose("QIBM_MULTI_THREADED=%s.", p ? p : "<unset>");
4270  }
4271
4272  p = ::getenv("LDR_CNTRL");
4273  trcVerbose("LDR_CNTRL=%s.", p ? p : "<unset>");
4274  if (os::Aix::on_pase() && os::Aix::os_version_short() == 0x0701) {
4275    if (p && ::strstr(p, "TEXTPSIZE")) {
4276      trcVerbose("*** WARNING - LDR_CNTRL contains TEXTPSIZE. "
4277        "you may experience hangs or crashes on OS/400 V7R1.");
4278    }
4279  }
4280
4281  p = ::getenv("AIXTHREAD_GUARDPAGES");
4282  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4283
4284} // end: os::Aix::scan_environment()
4285
4286// PASE: initialize the libo4 library (PASE porting library).
4287void os::Aix::initialize_libo4() {
4288  guarantee(os::Aix::on_pase(), "OS/400 only.");
4289  if (!libo4::init()) {
4290    trcVerbose("libo4 initialization failed.");
4291    assert(false, "libo4 initialization failed");
4292  } else {
4293    trcVerbose("libo4 initialized.");
4294  }
4295}
4296
4297// AIX: initialize the libperfstat library.
4298void os::Aix::initialize_libperfstat() {
4299  assert(os::Aix::on_aix(), "AIX only");
4300  if (!libperfstat::init()) {
4301    trcVerbose("libperfstat initialization failed.");
4302    assert(false, "libperfstat initialization failed");
4303  } else {
4304    trcVerbose("libperfstat initialized.");
4305  }
4306}
4307
4308/////////////////////////////////////////////////////////////////////////////
4309// thread stack
4310
4311// Function to query the current stack size using pthread_getthrds_np.
4312static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4313  // This only works when invoked on a pthread. As we agreed not to use
4314  // primordial threads anyway, I assert here.
4315  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4316
4317  // Information about this api can be found (a) in the pthread.h header and
4318  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4319  //
4320  // The use of this API to find out the current stack is kind of undefined.
4321  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4322  // enough for cases where I let the pthread library create its stacks. For cases
4323  // where I create an own stack and pass this to pthread_create, it seems not to
4324  // work (the returned stack size in that case is 0).
4325
4326  pthread_t tid = pthread_self();
4327  struct __pthrdsinfo pinfo;
4328  char dummy[1]; // Just needed to satisfy pthread_getthrds_np.
4329  int dummy_size = sizeof(dummy);
4330
4331  memset(&pinfo, 0, sizeof(pinfo));
4332
4333  const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4334                                     sizeof(pinfo), dummy, &dummy_size);
4335
4336  if (rc != 0) {
4337    assert0(false);
4338    trcVerbose("pthread_getthrds_np failed (%d)", rc);
4339    return false;
4340  }
4341  guarantee0(pinfo.__pi_stackend);
4342
4343  // The following may happen when invoking pthread_getthrds_np on a pthread
4344  // running on a user provided stack (when handing down a stack to pthread
4345  // create, see pthread_attr_setstackaddr).
4346  // Not sure what to do then.
4347
4348  guarantee0(pinfo.__pi_stacksize);
4349
4350  // Note: we get three values from pthread_getthrds_np:
4351  //       __pi_stackaddr, __pi_stacksize, __pi_stackend
4352  //
4353  // high addr    ---------------------
4354  //
4355  //    |         pthread internal data, like ~2K
4356  //    |
4357  //    |         ---------------------   __pi_stackend   (usually not page aligned, (xxxxF890))
4358  //    |
4359  //    |
4360  //    |
4361  //    |
4362  //    |
4363  //    |
4364  //    |          ---------------------   (__pi_stackend - __pi_stacksize)
4365  //    |
4366  //    |          padding to align the following AIX guard pages, if enabled.
4367  //    |
4368  //    V          ---------------------   __pi_stackaddr
4369  //
4370  // low addr      AIX guard pages, if enabled (AIXTHREAD_GUARDPAGES > 0)
4371  //
4372
4373  address stack_base = (address)(pinfo.__pi_stackend);
4374  address stack_low_addr = (address)align_ptr_up(pinfo.__pi_stackaddr,
4375    os::vm_page_size());
4376  size_t stack_size = stack_base - stack_low_addr;
4377
4378  if (p_stack_base) {
4379    *p_stack_base = stack_base;
4380  }
4381
4382  if (p_stack_size) {
4383    *p_stack_size = stack_size;
4384  }
4385
4386  return true;
4387}
4388
4389// Get the current stack base from the OS (actually, the pthread library).
4390address os::current_stack_base() {
4391  address p;
4392  query_stack_dimensions(&p, 0);
4393  return p;
4394}
4395
4396// Get the current stack size from the OS (actually, the pthread library).
4397size_t os::current_stack_size() {
4398  size_t s;
4399  query_stack_dimensions(0, &s);
4400  return s;
4401}
4402
4403// Refer to the comments in os_solaris.cpp park-unpark.
4404
4405// utility to compute the abstime argument to timedwait:
4406// millis is the relative timeout time
4407// abstime will be the absolute timeout time
4408// TODO: replace compute_abstime() with unpackTime()
4409
4410static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4411  if (millis < 0) millis = 0;
4412  struct timeval now;
4413  int status = gettimeofday(&now, NULL);
4414  assert(status == 0, "gettimeofday");
4415  jlong seconds = millis / 1000;
4416  millis %= 1000;
4417  if (seconds > 50000000) { // see man cond_timedwait(3T)
4418    seconds = 50000000;
4419  }
4420  abstime->tv_sec = now.tv_sec  + seconds;
4421  long       usec = now.tv_usec + millis * 1000;
4422  if (usec >= 1000000) {
4423    abstime->tv_sec += 1;
4424    usec -= 1000000;
4425  }
4426  abstime->tv_nsec = usec * 1000;
4427  return abstime;
4428}
4429
4430// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4431// Conceptually TryPark() should be equivalent to park(0).
4432
4433int os::PlatformEvent::TryPark() {
4434  for (;;) {
4435    const int v = _Event;
4436    guarantee ((v == 0) || (v == 1), "invariant");
4437    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4438  }
4439}
4440
4441void os::PlatformEvent::park() {       // AKA "down()"
4442  // Invariant: Only the thread associated with the Event/PlatformEvent
4443  // may call park().
4444  // TODO: assert that _Assoc != NULL or _Assoc == Self
4445  int v;
4446  for (;;) {
4447    v = _Event;
4448    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4449  }
4450  guarantee (v >= 0, "invariant");
4451  if (v == 0) {
4452    // Do this the hard way by blocking ...
4453    int status = pthread_mutex_lock(_mutex);
4454    assert_status(status == 0, status, "mutex_lock");
4455    guarantee (_nParked == 0, "invariant");
4456    ++ _nParked;
4457    while (_Event < 0) {
4458      status = pthread_cond_wait(_cond, _mutex);
4459      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4460    }
4461    -- _nParked;
4462
4463    // In theory we could move the ST of 0 into _Event past the unlock(),
4464    // but then we'd need a MEMBAR after the ST.
4465    _Event = 0;
4466    status = pthread_mutex_unlock(_mutex);
4467    assert_status(status == 0, status, "mutex_unlock");
4468  }
4469  guarantee (_Event >= 0, "invariant");
4470}
4471
4472int os::PlatformEvent::park(jlong millis) {
4473  guarantee (_nParked == 0, "invariant");
4474
4475  int v;
4476  for (;;) {
4477    v = _Event;
4478    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4479  }
4480  guarantee (v >= 0, "invariant");
4481  if (v != 0) return OS_OK;
4482
4483  // We do this the hard way, by blocking the thread.
4484  // Consider enforcing a minimum timeout value.
4485  struct timespec abst;
4486  compute_abstime(&abst, millis);
4487
4488  int ret = OS_TIMEOUT;
4489  int status = pthread_mutex_lock(_mutex);
4490  assert_status(status == 0, status, "mutex_lock");
4491  guarantee (_nParked == 0, "invariant");
4492  ++_nParked;
4493
4494  // Object.wait(timo) will return because of
4495  // (a) notification
4496  // (b) timeout
4497  // (c) thread.interrupt
4498  //
4499  // Thread.interrupt and object.notify{All} both call Event::set.
4500  // That is, we treat thread.interrupt as a special case of notification.
4501  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4502  // We assume all ETIME returns are valid.
4503  //
4504  // TODO: properly differentiate simultaneous notify+interrupt.
4505  // In that case, we should propagate the notify to another waiter.
4506
4507  while (_Event < 0) {
4508    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4509    assert_status(status == 0 || status == ETIMEDOUT,
4510                  status, "cond_timedwait");
4511    if (!FilterSpuriousWakeups) break;         // previous semantics
4512    if (status == ETIMEDOUT) break;
4513    // We consume and ignore EINTR and spurious wakeups.
4514  }
4515  --_nParked;
4516  if (_Event >= 0) {
4517     ret = OS_OK;
4518  }
4519  _Event = 0;
4520  status = pthread_mutex_unlock(_mutex);
4521  assert_status(status == 0, status, "mutex_unlock");
4522  assert (_nParked == 0, "invariant");
4523  return ret;
4524}
4525
4526void os::PlatformEvent::unpark() {
4527  int v, AnyWaiters;
4528  for (;;) {
4529    v = _Event;
4530    if (v > 0) {
4531      // The LD of _Event could have reordered or be satisfied
4532      // by a read-aside from this processor's write buffer.
4533      // To avoid problems execute a barrier and then
4534      // ratify the value.
4535      OrderAccess::fence();
4536      if (_Event == v) return;
4537      continue;
4538    }
4539    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4540  }
4541  if (v < 0) {
4542    // Wait for the thread associated with the event to vacate
4543    int status = pthread_mutex_lock(_mutex);
4544    assert_status(status == 0, status, "mutex_lock");
4545    AnyWaiters = _nParked;
4546
4547    if (AnyWaiters != 0) {
4548      // We intentional signal *after* dropping the lock
4549      // to avoid a common class of futile wakeups.
4550      status = pthread_cond_signal(_cond);
4551      assert_status(status == 0, status, "cond_signal");
4552    }
4553    // Mutex should be locked for pthread_cond_signal(_cond).
4554    status = pthread_mutex_unlock(_mutex);
4555    assert_status(status == 0, status, "mutex_unlock");
4556  }
4557
4558  // Note that we signal() _after dropping the lock for "immortal" Events.
4559  // This is safe and avoids a common class of futile wakeups. In rare
4560  // circumstances this can cause a thread to return prematurely from
4561  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4562  // simply re-test the condition and re-park itself.
4563}
4564
4565
4566// JSR166
4567// -------------------------------------------------------
4568
4569//
4570// The solaris and linux implementations of park/unpark are fairly
4571// conservative for now, but can be improved. They currently use a
4572// mutex/condvar pair, plus a a count.
4573// Park decrements count if > 0, else does a condvar wait. Unpark
4574// sets count to 1 and signals condvar. Only one thread ever waits
4575// on the condvar. Contention seen when trying to park implies that someone
4576// is unparking you, so don't wait. And spurious returns are fine, so there
4577// is no need to track notifications.
4578//
4579
4580#define MAX_SECS 100000000
4581//
4582// This code is common to linux and solaris and will be moved to a
4583// common place in dolphin.
4584//
4585// The passed in time value is either a relative time in nanoseconds
4586// or an absolute time in milliseconds. Either way it has to be unpacked
4587// into suitable seconds and nanoseconds components and stored in the
4588// given timespec structure.
4589// Given time is a 64-bit value and the time_t used in the timespec is only
4590// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4591// overflow if times way in the future are given. Further on Solaris versions
4592// prior to 10 there is a restriction (see cond_timedwait) that the specified
4593// number of seconds, in abstime, is less than current_time + 100,000,000.
4594// As it will be 28 years before "now + 100000000" will overflow we can
4595// ignore overflow and just impose a hard-limit on seconds using the value
4596// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4597// years from "now".
4598//
4599
4600static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4601  assert (time > 0, "convertTime");
4602
4603  struct timeval now;
4604  int status = gettimeofday(&now, NULL);
4605  assert(status == 0, "gettimeofday");
4606
4607  time_t max_secs = now.tv_sec + MAX_SECS;
4608
4609  if (isAbsolute) {
4610    jlong secs = time / 1000;
4611    if (secs > max_secs) {
4612      absTime->tv_sec = max_secs;
4613    }
4614    else {
4615      absTime->tv_sec = secs;
4616    }
4617    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4618  }
4619  else {
4620    jlong secs = time / NANOSECS_PER_SEC;
4621    if (secs >= MAX_SECS) {
4622      absTime->tv_sec = max_secs;
4623      absTime->tv_nsec = 0;
4624    }
4625    else {
4626      absTime->tv_sec = now.tv_sec + secs;
4627      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4628      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4629        absTime->tv_nsec -= NANOSECS_PER_SEC;
4630        ++absTime->tv_sec; // note: this must be <= max_secs
4631      }
4632    }
4633  }
4634  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4635  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4636  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4637  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4638}
4639
4640void Parker::park(bool isAbsolute, jlong time) {
4641  // Optional fast-path check:
4642  // Return immediately if a permit is available.
4643  if (_counter > 0) {
4644    _counter = 0;
4645    OrderAccess::fence();
4646    return;
4647  }
4648
4649  Thread* thread = Thread::current();
4650  assert(thread->is_Java_thread(), "Must be JavaThread");
4651  JavaThread *jt = (JavaThread *)thread;
4652
4653  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4654  // Check interrupt before trying to wait
4655  if (Thread::is_interrupted(thread, false)) {
4656    return;
4657  }
4658
4659  // Next, demultiplex/decode time arguments
4660  timespec absTime;
4661  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4662    return;
4663  }
4664  if (time > 0) {
4665    unpackTime(&absTime, isAbsolute, time);
4666  }
4667
4668  // Enter safepoint region
4669  // Beware of deadlocks such as 6317397.
4670  // The per-thread Parker:: mutex is a classic leaf-lock.
4671  // In particular a thread must never block on the Threads_lock while
4672  // holding the Parker:: mutex. If safepoints are pending both the
4673  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4674  ThreadBlockInVM tbivm(jt);
4675
4676  // Don't wait if cannot get lock since interference arises from
4677  // unblocking. Also. check interrupt before trying wait
4678  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4679    return;
4680  }
4681
4682  int status;
4683  if (_counter > 0) { // no wait needed
4684    _counter = 0;
4685    status = pthread_mutex_unlock(_mutex);
4686    assert (status == 0, "invariant");
4687    OrderAccess::fence();
4688    return;
4689  }
4690
4691#ifdef ASSERT
4692  // Don't catch signals while blocked; let the running threads have the signals.
4693  // (This allows a debugger to break into the running thread.)
4694  sigset_t oldsigs;
4695  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4696  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4697#endif
4698
4699  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4700  jt->set_suspend_equivalent();
4701  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4702
4703  if (time == 0) {
4704    status = pthread_cond_wait (_cond, _mutex);
4705  } else {
4706    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4707  }
4708  assert_status(status == 0 || status == EINTR ||
4709                status == ETIME || status == ETIMEDOUT,
4710                status, "cond_timedwait");
4711
4712#ifdef ASSERT
4713  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4714#endif
4715
4716  _counter = 0;
4717  status = pthread_mutex_unlock(_mutex);
4718  assert_status(status == 0, status, "invariant");
4719  // If externally suspended while waiting, re-suspend
4720  if (jt->handle_special_suspend_equivalent_condition()) {
4721    jt->java_suspend_self();
4722  }
4723
4724  OrderAccess::fence();
4725}
4726
4727void Parker::unpark() {
4728  int s, status;
4729  status = pthread_mutex_lock(_mutex);
4730  assert (status == 0, "invariant");
4731  s = _counter;
4732  _counter = 1;
4733  if (s < 1) {
4734    status = pthread_mutex_unlock(_mutex);
4735    assert (status == 0, "invariant");
4736    status = pthread_cond_signal (_cond);
4737    assert (status == 0, "invariant");
4738  } else {
4739    pthread_mutex_unlock(_mutex);
4740    assert (status == 0, "invariant");
4741  }
4742}
4743
4744extern char** environ;
4745
4746// Run the specified command in a separate process. Return its exit value,
4747// or -1 on failure (e.g. can't fork a new process).
4748// Unlike system(), this function can be called from signal handler. It
4749// doesn't block SIGINT et al.
4750int os::fork_and_exec(char* cmd) {
4751  char * argv[4] = {"sh", "-c", cmd, NULL};
4752
4753  pid_t pid = fork();
4754
4755  if (pid < 0) {
4756    // fork failed
4757    return -1;
4758
4759  } else if (pid == 0) {
4760    // child process
4761
4762    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4763    execve("/usr/bin/sh", argv, environ);
4764
4765    // execve failed
4766    _exit(-1);
4767
4768  } else {
4769    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4770    // care about the actual exit code, for now.
4771
4772    int status;
4773
4774    // Wait for the child process to exit. This returns immediately if
4775    // the child has already exited. */
4776    while (waitpid(pid, &status, 0) < 0) {
4777      switch (errno) {
4778        case ECHILD: return 0;
4779        case EINTR: break;
4780        default: return -1;
4781      }
4782    }
4783
4784    if (WIFEXITED(status)) {
4785      // The child exited normally; get its exit code.
4786      return WEXITSTATUS(status);
4787    } else if (WIFSIGNALED(status)) {
4788      // The child exited because of a signal.
4789      // The best value to return is 0x80 + signal number,
4790      // because that is what all Unix shells do, and because
4791      // it allows callers to distinguish between process exit and
4792      // process death by signal.
4793      return 0x80 + WTERMSIG(status);
4794    } else {
4795      // Unknown exit code; pass it through.
4796      return status;
4797    }
4798  }
4799  return -1;
4800}
4801
4802// is_headless_jre()
4803//
4804// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4805// in order to report if we are running in a headless jre.
4806//
4807// Since JDK8 xawt/libmawt.so is moved into the same directory
4808// as libawt.so, and renamed libawt_xawt.so
4809bool os::is_headless_jre() {
4810  struct stat statbuf;
4811  char buf[MAXPATHLEN];
4812  char libmawtpath[MAXPATHLEN];
4813  const char *xawtstr = "/xawt/libmawt.so";
4814  const char *new_xawtstr = "/libawt_xawt.so";
4815
4816  char *p;
4817
4818  // Get path to libjvm.so
4819  os::jvm_path(buf, sizeof(buf));
4820
4821  // Get rid of libjvm.so
4822  p = strrchr(buf, '/');
4823  if (p == NULL) return false;
4824  else *p = '\0';
4825
4826  // Get rid of client or server
4827  p = strrchr(buf, '/');
4828  if (p == NULL) return false;
4829  else *p = '\0';
4830
4831  // check xawt/libmawt.so
4832  strcpy(libmawtpath, buf);
4833  strcat(libmawtpath, xawtstr);
4834  if (::stat(libmawtpath, &statbuf) == 0) return false;
4835
4836  // check libawt_xawt.so
4837  strcpy(libmawtpath, buf);
4838  strcat(libmawtpath, new_xawtstr);
4839  if (::stat(libmawtpath, &statbuf) == 0) return false;
4840
4841  return true;
4842}
4843
4844// Get the default path to the core file
4845// Returns the length of the string
4846int os::get_core_path(char* buffer, size_t bufferSize) {
4847  const char* p = get_current_directory(buffer, bufferSize);
4848
4849  if (p == NULL) {
4850    assert(p != NULL, "failed to get current directory");
4851    return 0;
4852  }
4853
4854  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4855                                               p, current_process_id());
4856
4857  return strlen(buffer);
4858}
4859
4860#ifndef PRODUCT
4861void TestReserveMemorySpecial_test() {
4862  // No tests available for this platform
4863}
4864#endif
4865
4866bool os::start_debugging(char *buf, int buflen) {
4867  int len = (int)strlen(buf);
4868  char *p = &buf[len];
4869
4870  jio_snprintf(p, buflen -len,
4871                 "\n\n"
4872                 "Do you want to debug the problem?\n\n"
4873                 "To debug, run 'dbx -a %d'; then switch to thread tid " INTX_FORMAT ", k-tid " INTX_FORMAT "\n"
4874                 "Enter 'yes' to launch dbx automatically (PATH must include dbx)\n"
4875                 "Otherwise, press RETURN to abort...",
4876                 os::current_process_id(),
4877                 os::current_thread_id(), thread_self());
4878
4879  bool yes = os::message_box("Unexpected Error", buf);
4880
4881  if (yes) {
4882    // yes, user asked VM to launch debugger
4883    jio_snprintf(buf, buflen, "dbx -a %d", os::current_process_id());
4884
4885    os::fork_and_exec(buf);
4886    yes = false;
4887  }
4888  return yes;
4889}
4890
4891static inline time_t get_mtime(const char* filename) {
4892  struct stat st;
4893  int ret = os::stat(filename, &st);
4894  assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno));
4895  return st.st_mtime;
4896}
4897
4898int os::compare_file_modified_times(const char* file1, const char* file2) {
4899  time_t t1 = get_mtime(file1);
4900  time_t t2 = get_mtime(file2);
4901  return t1 - t2;
4902}
4903