os_aix.cpp revision 8555:c30414cbbd88
1/*
2 * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libperfstat_aix.hpp"
40#include "loadlib_aix.hpp"
41#include "memory/allocation.inline.hpp"
42#include "memory/filemap.hpp"
43#include "mutex_aix.inline.hpp"
44#include "oops/oop.inline.hpp"
45#include "os_aix.inline.hpp"
46#include "os_share_aix.hpp"
47#include "porting_aix.hpp"
48#include "prims/jniFastGetField.hpp"
49#include "prims/jvm.h"
50#include "prims/jvm_misc.hpp"
51#include "runtime/arguments.hpp"
52#include "runtime/atomic.inline.hpp"
53#include "runtime/extendedPC.hpp"
54#include "runtime/globals.hpp"
55#include "runtime/interfaceSupport.hpp"
56#include "runtime/java.hpp"
57#include "runtime/javaCalls.hpp"
58#include "runtime/mutexLocker.hpp"
59#include "runtime/objectMonitor.hpp"
60#include "runtime/orderAccess.inline.hpp"
61#include "runtime/os.hpp"
62#include "runtime/osThread.hpp"
63#include "runtime/perfMemory.hpp"
64#include "runtime/sharedRuntime.hpp"
65#include "runtime/statSampler.hpp"
66#include "runtime/stubRoutines.hpp"
67#include "runtime/thread.inline.hpp"
68#include "runtime/threadCritical.hpp"
69#include "runtime/timer.hpp"
70#include "runtime/vm_version.hpp"
71#include "services/attachListener.hpp"
72#include "services/runtimeService.hpp"
73#include "utilities/decoder.hpp"
74#include "utilities/defaultStream.hpp"
75#include "utilities/events.hpp"
76#include "utilities/growableArray.hpp"
77#include "utilities/vmError.hpp"
78
79// put OS-includes here (sorted alphabetically)
80#include <errno.h>
81#include <fcntl.h>
82#include <inttypes.h>
83#include <poll.h>
84#include <procinfo.h>
85#include <pthread.h>
86#include <pwd.h>
87#include <semaphore.h>
88#include <signal.h>
89#include <stdint.h>
90#include <stdio.h>
91#include <string.h>
92#include <unistd.h>
93#include <sys/ioctl.h>
94#include <sys/ipc.h>
95#include <sys/mman.h>
96#include <sys/resource.h>
97#include <sys/select.h>
98#include <sys/shm.h>
99#include <sys/socket.h>
100#include <sys/stat.h>
101#include <sys/sysinfo.h>
102#include <sys/systemcfg.h>
103#include <sys/time.h>
104#include <sys/times.h>
105#include <sys/types.h>
106#include <sys/utsname.h>
107#include <sys/vminfo.h>
108#include <sys/wait.h>
109
110// If RUSAGE_THREAD for getrusage() has not been defined, do it here. The code calling
111// getrusage() is prepared to handle the associated failure.
112#ifndef RUSAGE_THREAD
113#define RUSAGE_THREAD   (1)               /* only the calling thread */
114#endif
115
116// PPC port
117static const uintx Use64KPagesThreshold       = 1*M;
118static const uintx MaxExpectedDataSegmentSize = SIZE_4G*2;
119
120// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
121#if !defined(_AIXVERSION_610)
122extern "C" {
123  int getthrds64(pid_t ProcessIdentifier,
124                 struct thrdentry64* ThreadBuffer,
125                 int ThreadSize,
126                 tid64_t* IndexPointer,
127                 int Count);
128}
129#endif
130
131#define MAX_PATH (2 * K)
132
133// for timer info max values which include all bits
134#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
135// for multipage initialization error analysis (in 'g_multipage_error')
136#define ERROR_MP_OS_TOO_OLD                          100
137#define ERROR_MP_EXTSHM_ACTIVE                       101
138#define ERROR_MP_VMGETINFO_FAILED                    102
139#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
140
141// The semantics in this file are thus that codeptr_t is a *real code ptr*.
142// This means that any function taking codeptr_t as arguments will assume
143// a real codeptr and won't handle function descriptors (eg getFuncName),
144// whereas functions taking address as args will deal with function
145// descriptors (eg os::dll_address_to_library_name).
146typedef unsigned int* codeptr_t;
147
148// Typedefs for stackslots, stack pointers, pointers to op codes.
149typedef unsigned long stackslot_t;
150typedef stackslot_t* stackptr_t;
151
152// Excerpts from systemcfg.h definitions newer than AIX 5.3.
153#ifndef PV_7
154#define PV_7 0x200000          /* Power PC 7 */
155#define PV_7_Compat 0x208000   /* Power PC 7 */
156#endif
157#ifndef PV_8
158#define PV_8 0x300000          /* Power PC 8 */
159#define PV_8_Compat 0x308000   /* Power PC 8 */
160#endif
161
162#define trcVerbose(fmt, ...) { /* PPC port */  \
163  if (Verbose) { \
164    fprintf(stderr, fmt, ##__VA_ARGS__); \
165    fputc('\n', stderr); fflush(stderr); \
166  } \
167}
168#define trc(fmt, ...)        /* PPC port */
169
170#define ERRBYE(s) { \
171    trcVerbose(s); \
172    return -1; \
173}
174
175// Query dimensions of the stack of the calling thread.
176static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
177
178// function to check a given stack pointer against given stack limits
179inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
180  if (((uintptr_t)sp) & 0x7) {
181    return false;
182  }
183  if (sp > stack_base) {
184    return false;
185  }
186  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
187    return false;
188  }
189  return true;
190}
191
192// returns true if function is a valid codepointer
193inline bool is_valid_codepointer(codeptr_t p) {
194  if (!p) {
195    return false;
196  }
197  if (((uintptr_t)p) & 0x3) {
198    return false;
199  }
200  if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
201    return false;
202  }
203  return true;
204}
205
206// Macro to check a given stack pointer against given stack limits and to die if test fails.
207#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
208    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
209}
210
211// Macro to check the current stack pointer against given stacklimits.
212#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
213  address sp; \
214  sp = os::current_stack_pointer(); \
215  CHECK_STACK_PTR(sp, stack_base, stack_size); \
216}
217
218////////////////////////////////////////////////////////////////////////////////
219// global variables (for a description see os_aix.hpp)
220
221julong    os::Aix::_physical_memory = 0;
222pthread_t os::Aix::_main_thread = ((pthread_t)0);
223int       os::Aix::_page_size = -1;
224int       os::Aix::_on_pase = -1;
225int       os::Aix::_os_version = -1;
226int       os::Aix::_stack_page_size = -1;
227int       os::Aix::_xpg_sus_mode = -1;
228int       os::Aix::_extshm = -1;
229int       os::Aix::_logical_cpus = -1;
230
231////////////////////////////////////////////////////////////////////////////////
232// local variables
233
234static int      g_multipage_error  = -1;   // error analysis for multipage initialization
235static jlong    initial_time_count = 0;
236static int      clock_tics_per_sec = 100;
237static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
238static bool     check_signals      = true;
239static pid_t    _initial_pid       = 0;
240static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
241static sigset_t SR_sigset;
242
243// This describes the state of multipage support of the underlying
244// OS. Note that this is of no interest to the outsize world and
245// therefore should not be defined in AIX class.
246//
247// AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The
248// latter two (16M "large" resp. 16G "huge" pages) require special
249// setup and are normally not available.
250//
251// AIX supports multiple page sizes per process, for:
252//  - Stack (of the primordial thread, so not relevant for us)
253//  - Data - data, bss, heap, for us also pthread stacks
254//  - Text - text code
255//  - shared memory
256//
257// Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
258// and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...).
259//
260// For shared memory, page size can be set dynamically via
261// shmctl(). Different shared memory regions can have different page
262// sizes.
263//
264// More information can be found at AIBM info center:
265//   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
266//
267static struct {
268  size_t pagesize;            // sysconf _SC_PAGESIZE (4K)
269  size_t datapsize;           // default data page size (LDR_CNTRL DATAPSIZE)
270  size_t shmpsize;            // default shared memory page size (LDR_CNTRL SHMPSIZE)
271  size_t pthr_stack_pagesize; // stack page size of pthread threads
272  size_t textpsize;           // default text page size (LDR_CNTRL STACKPSIZE)
273  bool can_use_64K_pages;     // True if we can alloc 64K pages dynamically with Sys V shm.
274  bool can_use_16M_pages;     // True if we can alloc 16M pages dynamically with Sys V shm.
275  int error;                  // Error describing if something went wrong at multipage init.
276} g_multipage_support = {
277  (size_t) -1,
278  (size_t) -1,
279  (size_t) -1,
280  (size_t) -1,
281  (size_t) -1,
282  false, false,
283  0
284};
285
286// We must not accidentally allocate memory close to the BRK - even if
287// that would work - because then we prevent the BRK segment from
288// growing which may result in a malloc OOM even though there is
289// enough memory. The problem only arises if we shmat() or mmap() at
290// a specific wish address, e.g. to place the heap in a
291// compressed-oops-friendly way.
292static bool is_close_to_brk(address a) {
293  address a1 = (address) sbrk(0);
294  if (a >= a1 && a < (a1 + MaxExpectedDataSegmentSize)) {
295    return true;
296  }
297  return false;
298}
299
300julong os::available_memory() {
301  return Aix::available_memory();
302}
303
304julong os::Aix::available_memory() {
305  os::Aix::meminfo_t mi;
306  if (os::Aix::get_meminfo(&mi)) {
307    return mi.real_free;
308  } else {
309    return 0xFFFFFFFFFFFFFFFFLL;
310  }
311}
312
313julong os::physical_memory() {
314  return Aix::physical_memory();
315}
316
317// Return true if user is running as root.
318
319bool os::have_special_privileges() {
320  static bool init = false;
321  static bool privileges = false;
322  if (!init) {
323    privileges = (getuid() != geteuid()) || (getgid() != getegid());
324    init = true;
325  }
326  return privileges;
327}
328
329// Helper function, emulates disclaim64 using multiple 32bit disclaims
330// because we cannot use disclaim64() on AS/400 and old AIX releases.
331static bool my_disclaim64(char* addr, size_t size) {
332
333  if (size == 0) {
334    return true;
335  }
336
337  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
338  const unsigned int maxDisclaimSize = 0x40000000;
339
340  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
341  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
342
343  char* p = addr;
344
345  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
346    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
347      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
348      return false;
349    }
350    p += maxDisclaimSize;
351  }
352
353  if (lastDisclaimSize > 0) {
354    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
355      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
356      return false;
357    }
358  }
359
360  return true;
361}
362
363// Cpu architecture string
364#if defined(PPC32)
365static char cpu_arch[] = "ppc";
366#elif defined(PPC64)
367static char cpu_arch[] = "ppc64";
368#else
369#error Add appropriate cpu_arch setting
370#endif
371
372
373// Given an address, returns the size of the page backing that address.
374size_t os::Aix::query_pagesize(void* addr) {
375
376  vm_page_info pi;
377  pi.addr = (uint64_t)addr;
378  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
379    return pi.pagesize;
380  } else {
381    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
382    assert(false, "vmgetinfo failed to retrieve page size");
383    return SIZE_4K;
384  }
385
386}
387
388// Returns the kernel thread id of the currently running thread.
389pid_t os::Aix::gettid() {
390  return (pid_t) thread_self();
391}
392
393void os::Aix::initialize_system_info() {
394
395  // Get the number of online(logical) cpus instead of configured.
396  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
397  assert(_processor_count > 0, "_processor_count must be > 0");
398
399  // Retrieve total physical storage.
400  os::Aix::meminfo_t mi;
401  if (!os::Aix::get_meminfo(&mi)) {
402    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
403    assert(false, "os::Aix::get_meminfo failed.");
404  }
405  _physical_memory = (julong) mi.real_total;
406}
407
408// Helper function for tracing page sizes.
409static const char* describe_pagesize(size_t pagesize) {
410  switch (pagesize) {
411    case SIZE_4K : return "4K";
412    case SIZE_64K: return "64K";
413    case SIZE_16M: return "16M";
414    case SIZE_16G: return "16G";
415    case -1:       return "not set";
416    default:
417      assert(false, "surprise");
418      return "??";
419  }
420}
421
422// Probe OS for multipage support.
423// Will fill the global g_multipage_support structure.
424// Must be called before calling os::large_page_init().
425static void query_multipage_support() {
426
427  guarantee(g_multipage_support.pagesize == -1,
428            "do not call twice");
429
430  g_multipage_support.pagesize = ::sysconf(_SC_PAGESIZE);
431
432  // This really would surprise me.
433  assert(g_multipage_support.pagesize == SIZE_4K, "surprise!");
434
435  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
436  // Default data page size is defined either by linker options (-bdatapsize)
437  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
438  // default should be 4K.
439  {
440    void* p = ::malloc(SIZE_16M);
441    g_multipage_support.datapsize = os::Aix::query_pagesize(p);
442    ::free(p);
443  }
444
445  // Query default shm page size (LDR_CNTRL SHMPSIZE).
446  {
447    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
448    guarantee(shmid != -1, "shmget failed");
449    void* p = ::shmat(shmid, NULL, 0);
450    ::shmctl(shmid, IPC_RMID, NULL);
451    guarantee(p != (void*) -1, "shmat failed");
452    g_multipage_support.shmpsize = os::Aix::query_pagesize(p);
453    ::shmdt(p);
454  }
455
456  // Before querying the stack page size, make sure we are not running as primordial
457  // thread (because primordial thread's stack may have different page size than
458  // pthread thread stacks). Running a VM on the primordial thread won't work for a
459  // number of reasons so we may just as well guarantee it here.
460  guarantee0(!os::Aix::is_primordial_thread());
461
462  // Query pthread stack page size.
463  {
464    int dummy = 0;
465    g_multipage_support.pthr_stack_pagesize = os::Aix::query_pagesize(&dummy);
466  }
467
468  // Query default text page size (LDR_CNTRL TEXTPSIZE).
469  /* PPC port: so far unused.
470  {
471    address any_function =
472      (address) resolve_function_descriptor_to_code_pointer((address)describe_pagesize);
473    g_multipage_support.textpsize = os::Aix::query_pagesize(any_function);
474  }
475  */
476
477  // Now probe for support of 64K pages and 16M pages.
478
479  // Before OS/400 V6R1, there is no support for pages other than 4K.
480  if (os::Aix::on_pase_V5R4_or_older()) {
481    Unimplemented();
482    goto query_multipage_support_end;
483  }
484
485  // Now check which page sizes the OS claims it supports, and of those, which actually can be used.
486  {
487    const int MAX_PAGE_SIZES = 4;
488    psize_t sizes[MAX_PAGE_SIZES];
489    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
490    if (num_psizes == -1) {
491      trc("vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
492      trc("disabling multipage support.\n");
493      g_multipage_support.error = ERROR_MP_VMGETINFO_FAILED;
494      goto query_multipage_support_end;
495    }
496    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
497    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
498    trcVerbose("vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
499    for (int i = 0; i < num_psizes; i ++) {
500      trcVerbose(" %s ", describe_pagesize(sizes[i]));
501    }
502
503    // Can we use 64K, 16M pages?
504    for (int i = 0; i < num_psizes; i ++) {
505      const size_t pagesize = sizes[i];
506      if (pagesize != SIZE_64K && pagesize != SIZE_16M) {
507        continue;
508      }
509      bool can_use = false;
510      trcVerbose("Probing support for %s pages...", describe_pagesize(pagesize));
511      const int shmid = ::shmget(IPC_PRIVATE, pagesize,
512        IPC_CREAT | S_IRUSR | S_IWUSR);
513      guarantee0(shmid != -1); // Should always work.
514      // Try to set pagesize.
515      struct shmid_ds shm_buf = { 0 };
516      shm_buf.shm_pagesize = pagesize;
517      if (::shmctl(shmid, SHM_PAGESIZE, &shm_buf) != 0) {
518        const int en = errno;
519        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
520        // PPC port trcVerbose("shmctl(SHM_PAGESIZE) failed with %s",
521        // PPC port  MiscUtils::describe_errno(en));
522      } else {
523        // Attach and double check pageisze.
524        void* p = ::shmat(shmid, NULL, 0);
525        ::shmctl(shmid, IPC_RMID, NULL); // As early as possible!
526        guarantee0(p != (void*) -1); // Should always work.
527        const size_t real_pagesize = os::Aix::query_pagesize(p);
528        if (real_pagesize != pagesize) {
529          trcVerbose("real page size (0x%llX) differs.", real_pagesize);
530        } else {
531          can_use = true;
532        }
533        ::shmdt(p);
534      }
535      trcVerbose("Can use: %s", (can_use ? "yes" : "no"));
536      if (pagesize == SIZE_64K) {
537        g_multipage_support.can_use_64K_pages = can_use;
538      } else if (pagesize == SIZE_16M) {
539        g_multipage_support.can_use_16M_pages = can_use;
540      }
541    }
542
543  } // end: check which pages can be used for shared memory
544
545query_multipage_support_end:
546
547  trcVerbose("base page size (sysconf _SC_PAGESIZE): %s\n",
548      describe_pagesize(g_multipage_support.pagesize));
549  trcVerbose("Data page size (C-Heap, bss, etc): %s\n",
550      describe_pagesize(g_multipage_support.datapsize));
551  trcVerbose("Text page size: %s\n",
552      describe_pagesize(g_multipage_support.textpsize));
553  trcVerbose("Thread stack page size (pthread): %s\n",
554      describe_pagesize(g_multipage_support.pthr_stack_pagesize));
555  trcVerbose("Default shared memory page size: %s\n",
556      describe_pagesize(g_multipage_support.shmpsize));
557  trcVerbose("Can use 64K pages dynamically with shared meory: %s\n",
558      (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
559  trcVerbose("Can use 16M pages dynamically with shared memory: %s\n",
560      (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
561  trcVerbose("Multipage error details: %d\n",
562      g_multipage_support.error);
563
564  // sanity checks
565  assert0(g_multipage_support.pagesize == SIZE_4K);
566  assert0(g_multipage_support.datapsize == SIZE_4K || g_multipage_support.datapsize == SIZE_64K);
567  // PPC port: so far unused.assert0(g_multipage_support.textpsize == SIZE_4K || g_multipage_support.textpsize == SIZE_64K);
568  assert0(g_multipage_support.pthr_stack_pagesize == g_multipage_support.datapsize);
569  assert0(g_multipage_support.shmpsize == SIZE_4K || g_multipage_support.shmpsize == SIZE_64K);
570
571} // end os::Aix::query_multipage_support()
572
573void os::init_system_properties_values() {
574
575#define DEFAULT_LIBPATH "/usr/lib:/lib"
576#define EXTENSIONS_DIR  "/lib/ext"
577
578  // Buffer that fits several sprintfs.
579  // Note that the space for the trailing null is provided
580  // by the nulls included by the sizeof operator.
581  const size_t bufsize =
582    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
583         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
584  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
585
586  // sysclasspath, java_home, dll_dir
587  {
588    char *pslash;
589    os::jvm_path(buf, bufsize);
590
591    // Found the full path to libjvm.so.
592    // Now cut the path to <java_home>/jre if we can.
593    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
594    pslash = strrchr(buf, '/');
595    if (pslash != NULL) {
596      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
597    }
598    Arguments::set_dll_dir(buf);
599
600    if (pslash != NULL) {
601      pslash = strrchr(buf, '/');
602      if (pslash != NULL) {
603        *pslash = '\0';          // Get rid of /<arch>.
604        pslash = strrchr(buf, '/');
605        if (pslash != NULL) {
606          *pslash = '\0';        // Get rid of /lib.
607        }
608      }
609    }
610    Arguments::set_java_home(buf);
611    set_boot_path('/', ':');
612  }
613
614  // Where to look for native libraries.
615
616  // On Aix we get the user setting of LIBPATH.
617  // Eventually, all the library path setting will be done here.
618  // Get the user setting of LIBPATH.
619  const char *v = ::getenv("LIBPATH");
620  const char *v_colon = ":";
621  if (v == NULL) { v = ""; v_colon = ""; }
622
623  // Concatenate user and invariant part of ld_library_path.
624  // That's +1 for the colon and +1 for the trailing '\0'.
625  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
626  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
627  Arguments::set_library_path(ld_library_path);
628  FREE_C_HEAP_ARRAY(char, ld_library_path);
629
630  // Extensions directories.
631  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
632  Arguments::set_ext_dirs(buf);
633
634  FREE_C_HEAP_ARRAY(char, buf);
635
636#undef DEFAULT_LIBPATH
637#undef EXTENSIONS_DIR
638}
639
640////////////////////////////////////////////////////////////////////////////////
641// breakpoint support
642
643void os::breakpoint() {
644  BREAKPOINT;
645}
646
647extern "C" void breakpoint() {
648  // use debugger to set breakpoint here
649}
650
651////////////////////////////////////////////////////////////////////////////////
652// signal support
653
654debug_only(static bool signal_sets_initialized = false);
655static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
656
657bool os::Aix::is_sig_ignored(int sig) {
658  struct sigaction oact;
659  sigaction(sig, (struct sigaction*)NULL, &oact);
660  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
661    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
662  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
663    return true;
664  } else {
665    return false;
666  }
667}
668
669void os::Aix::signal_sets_init() {
670  // Should also have an assertion stating we are still single-threaded.
671  assert(!signal_sets_initialized, "Already initialized");
672  // Fill in signals that are necessarily unblocked for all threads in
673  // the VM. Currently, we unblock the following signals:
674  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
675  //                         by -Xrs (=ReduceSignalUsage));
676  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
677  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
678  // the dispositions or masks wrt these signals.
679  // Programs embedding the VM that want to use the above signals for their
680  // own purposes must, at this time, use the "-Xrs" option to prevent
681  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
682  // (See bug 4345157, and other related bugs).
683  // In reality, though, unblocking these signals is really a nop, since
684  // these signals are not blocked by default.
685  sigemptyset(&unblocked_sigs);
686  sigemptyset(&allowdebug_blocked_sigs);
687  sigaddset(&unblocked_sigs, SIGILL);
688  sigaddset(&unblocked_sigs, SIGSEGV);
689  sigaddset(&unblocked_sigs, SIGBUS);
690  sigaddset(&unblocked_sigs, SIGFPE);
691  sigaddset(&unblocked_sigs, SIGTRAP);
692  sigaddset(&unblocked_sigs, SIGDANGER);
693  sigaddset(&unblocked_sigs, SR_signum);
694
695  if (!ReduceSignalUsage) {
696   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
697     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
698     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
699   }
700   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
701     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
702     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
703   }
704   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
705     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
706     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
707   }
708  }
709  // Fill in signals that are blocked by all but the VM thread.
710  sigemptyset(&vm_sigs);
711  if (!ReduceSignalUsage)
712    sigaddset(&vm_sigs, BREAK_SIGNAL);
713  debug_only(signal_sets_initialized = true);
714}
715
716// These are signals that are unblocked while a thread is running Java.
717// (For some reason, they get blocked by default.)
718sigset_t* os::Aix::unblocked_signals() {
719  assert(signal_sets_initialized, "Not initialized");
720  return &unblocked_sigs;
721}
722
723// These are the signals that are blocked while a (non-VM) thread is
724// running Java. Only the VM thread handles these signals.
725sigset_t* os::Aix::vm_signals() {
726  assert(signal_sets_initialized, "Not initialized");
727  return &vm_sigs;
728}
729
730// These are signals that are blocked during cond_wait to allow debugger in
731sigset_t* os::Aix::allowdebug_blocked_signals() {
732  assert(signal_sets_initialized, "Not initialized");
733  return &allowdebug_blocked_sigs;
734}
735
736void os::Aix::hotspot_sigmask(Thread* thread) {
737
738  //Save caller's signal mask before setting VM signal mask
739  sigset_t caller_sigmask;
740  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
741
742  OSThread* osthread = thread->osthread();
743  osthread->set_caller_sigmask(caller_sigmask);
744
745  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
746
747  if (!ReduceSignalUsage) {
748    if (thread->is_VM_thread()) {
749      // Only the VM thread handles BREAK_SIGNAL ...
750      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
751    } else {
752      // ... all other threads block BREAK_SIGNAL
753      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
754    }
755  }
756}
757
758// retrieve memory information.
759// Returns false if something went wrong;
760// content of pmi undefined in this case.
761bool os::Aix::get_meminfo(meminfo_t* pmi) {
762
763  assert(pmi, "get_meminfo: invalid parameter");
764
765  memset(pmi, 0, sizeof(meminfo_t));
766
767  if (os::Aix::on_pase()) {
768
769    Unimplemented();
770    return false;
771
772  } else {
773
774    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
775    // See:
776    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
777    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
778    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
779    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
780
781    perfstat_memory_total_t psmt;
782    memset (&psmt, '\0', sizeof(psmt));
783    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
784    if (rc == -1) {
785      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
786      assert(0, "perfstat_memory_total() failed");
787      return false;
788    }
789
790    assert(rc == 1, "perfstat_memory_total() - weird return code");
791
792    // excerpt from
793    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
794    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
795    // The fields of perfstat_memory_total_t:
796    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
797    // u_longlong_t real_total         Total real memory (in 4 KB pages).
798    // u_longlong_t real_free          Free real memory (in 4 KB pages).
799    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
800    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
801
802    pmi->virt_total = psmt.virt_total * 4096;
803    pmi->real_total = psmt.real_total * 4096;
804    pmi->real_free = psmt.real_free * 4096;
805    pmi->pgsp_total = psmt.pgsp_total * 4096;
806    pmi->pgsp_free = psmt.pgsp_free * 4096;
807
808    return true;
809
810  }
811} // end os::Aix::get_meminfo
812
813// Retrieve global cpu information.
814// Returns false if something went wrong;
815// the content of pci is undefined in this case.
816bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
817  assert(pci, "get_cpuinfo: invalid parameter");
818  memset(pci, 0, sizeof(cpuinfo_t));
819
820  perfstat_cpu_total_t psct;
821  memset (&psct, '\0', sizeof(psct));
822
823  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
824    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
825    assert(0, "perfstat_cpu_total() failed");
826    return false;
827  }
828
829  // global cpu information
830  strcpy (pci->description, psct.description);
831  pci->processorHZ = psct.processorHZ;
832  pci->ncpus = psct.ncpus;
833  os::Aix::_logical_cpus = psct.ncpus;
834  for (int i = 0; i < 3; i++) {
835    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
836  }
837
838  // get the processor version from _system_configuration
839  switch (_system_configuration.version) {
840  case PV_8:
841    strcpy(pci->version, "Power PC 8");
842    break;
843  case PV_7:
844    strcpy(pci->version, "Power PC 7");
845    break;
846  case PV_6_1:
847    strcpy(pci->version, "Power PC 6 DD1.x");
848    break;
849  case PV_6:
850    strcpy(pci->version, "Power PC 6");
851    break;
852  case PV_5:
853    strcpy(pci->version, "Power PC 5");
854    break;
855  case PV_5_2:
856    strcpy(pci->version, "Power PC 5_2");
857    break;
858  case PV_5_3:
859    strcpy(pci->version, "Power PC 5_3");
860    break;
861  case PV_5_Compat:
862    strcpy(pci->version, "PV_5_Compat");
863    break;
864  case PV_6_Compat:
865    strcpy(pci->version, "PV_6_Compat");
866    break;
867  case PV_7_Compat:
868    strcpy(pci->version, "PV_7_Compat");
869    break;
870  case PV_8_Compat:
871    strcpy(pci->version, "PV_8_Compat");
872    break;
873  default:
874    strcpy(pci->version, "unknown");
875  }
876
877  return true;
878
879} //end os::Aix::get_cpuinfo
880
881//////////////////////////////////////////////////////////////////////////////
882// detecting pthread library
883
884void os::Aix::libpthread_init() {
885  return;
886}
887
888//////////////////////////////////////////////////////////////////////////////
889// create new thread
890
891// Thread start routine for all newly created threads
892static void *java_start(Thread *thread) {
893
894  // find out my own stack dimensions
895  {
896    // actually, this should do exactly the same as thread->record_stack_base_and_size...
897    address base = 0;
898    size_t size = 0;
899    query_stack_dimensions(&base, &size);
900    thread->set_stack_base(base);
901    thread->set_stack_size(size);
902  }
903
904  // Do some sanity checks.
905  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
906
907  // Try to randomize the cache line index of hot stack frames.
908  // This helps when threads of the same stack traces evict each other's
909  // cache lines. The threads can be either from the same JVM instance, or
910  // from different JVM instances. The benefit is especially true for
911  // processors with hyperthreading technology.
912
913  static int counter = 0;
914  int pid = os::current_process_id();
915  alloca(((pid ^ counter++) & 7) * 128);
916
917  ThreadLocalStorage::set_thread(thread);
918
919  OSThread* osthread = thread->osthread();
920
921  // thread_id is kernel thread id (similar to Solaris LWP id)
922  osthread->set_thread_id(os::Aix::gettid());
923
924  // initialize signal mask for this thread
925  os::Aix::hotspot_sigmask(thread);
926
927  // initialize floating point control register
928  os::Aix::init_thread_fpu_state();
929
930  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
931
932  // call one more level start routine
933  thread->run();
934
935  return 0;
936}
937
938bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
939
940  // We want the whole function to be synchronized.
941  ThreadCritical cs;
942
943  assert(thread->osthread() == NULL, "caller responsible");
944
945  // Allocate the OSThread object
946  OSThread* osthread = new OSThread(NULL, NULL);
947  if (osthread == NULL) {
948    return false;
949  }
950
951  // set the correct thread state
952  osthread->set_thread_type(thr_type);
953
954  // Initial state is ALLOCATED but not INITIALIZED
955  osthread->set_state(ALLOCATED);
956
957  thread->set_osthread(osthread);
958
959  // init thread attributes
960  pthread_attr_t attr;
961  pthread_attr_init(&attr);
962  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
963
964  // Make sure we run in 1:1 kernel-user-thread mode.
965  if (os::Aix::on_aix()) {
966    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
967    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
968  } // end: aix
969
970  // Start in suspended state, and in os::thread_start, wake the thread up.
971  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
972
973  // calculate stack size if it's not specified by caller
974  if (os::Aix::supports_variable_stack_size()) {
975    if (stack_size == 0) {
976      stack_size = os::Aix::default_stack_size(thr_type);
977
978      switch (thr_type) {
979      case os::java_thread:
980        // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
981        assert(JavaThread::stack_size_at_create() > 0, "this should be set");
982        stack_size = JavaThread::stack_size_at_create();
983        break;
984      case os::compiler_thread:
985        if (CompilerThreadStackSize > 0) {
986          stack_size = (size_t)(CompilerThreadStackSize * K);
987          break;
988        } // else fall through:
989          // use VMThreadStackSize if CompilerThreadStackSize is not defined
990      case os::vm_thread:
991      case os::pgc_thread:
992      case os::cgc_thread:
993      case os::watcher_thread:
994        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
995        break;
996      }
997    }
998
999    stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
1000    pthread_attr_setstacksize(&attr, stack_size);
1001  } //else let thread_create() pick the default value (96 K on AIX)
1002
1003  pthread_t tid;
1004  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
1005
1006  pthread_attr_destroy(&attr);
1007
1008  if (ret == 0) {
1009    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
1010  } else {
1011    if (PrintMiscellaneous && (Verbose || WizardMode)) {
1012      perror("pthread_create()");
1013    }
1014    // Need to clean up stuff we've allocated so far
1015    thread->set_osthread(NULL);
1016    delete osthread;
1017    return false;
1018  }
1019
1020  // Store pthread info into the OSThread
1021  osthread->set_pthread_id(tid);
1022
1023  return true;
1024}
1025
1026/////////////////////////////////////////////////////////////////////////////
1027// attach existing thread
1028
1029// bootstrap the main thread
1030bool os::create_main_thread(JavaThread* thread) {
1031  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
1032  return create_attached_thread(thread);
1033}
1034
1035bool os::create_attached_thread(JavaThread* thread) {
1036#ifdef ASSERT
1037    thread->verify_not_published();
1038#endif
1039
1040  // Allocate the OSThread object
1041  OSThread* osthread = new OSThread(NULL, NULL);
1042
1043  if (osthread == NULL) {
1044    return false;
1045  }
1046
1047  // Store pthread info into the OSThread
1048  osthread->set_thread_id(os::Aix::gettid());
1049  osthread->set_pthread_id(::pthread_self());
1050
1051  // initialize floating point control register
1052  os::Aix::init_thread_fpu_state();
1053
1054  // some sanity checks
1055  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1056
1057  // Initial thread state is RUNNABLE
1058  osthread->set_state(RUNNABLE);
1059
1060  thread->set_osthread(osthread);
1061
1062  if (UseNUMA) {
1063    int lgrp_id = os::numa_get_group_id();
1064    if (lgrp_id != -1) {
1065      thread->set_lgrp_id(lgrp_id);
1066    }
1067  }
1068
1069  // initialize signal mask for this thread
1070  // and save the caller's signal mask
1071  os::Aix::hotspot_sigmask(thread);
1072
1073  return true;
1074}
1075
1076void os::pd_start_thread(Thread* thread) {
1077  int status = pthread_continue_np(thread->osthread()->pthread_id());
1078  assert(status == 0, "thr_continue failed");
1079}
1080
1081// Free OS resources related to the OSThread
1082void os::free_thread(OSThread* osthread) {
1083  assert(osthread != NULL, "osthread not set");
1084
1085  if (Thread::current()->osthread() == osthread) {
1086    // Restore caller's signal mask
1087    sigset_t sigmask = osthread->caller_sigmask();
1088    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1089   }
1090
1091  delete osthread;
1092}
1093
1094//////////////////////////////////////////////////////////////////////////////
1095// thread local storage
1096
1097int os::allocate_thread_local_storage() {
1098  pthread_key_t key;
1099  int rslt = pthread_key_create(&key, NULL);
1100  assert(rslt == 0, "cannot allocate thread local storage");
1101  return (int)key;
1102}
1103
1104// Note: This is currently not used by VM, as we don't destroy TLS key
1105// on VM exit.
1106void os::free_thread_local_storage(int index) {
1107  int rslt = pthread_key_delete((pthread_key_t)index);
1108  assert(rslt == 0, "invalid index");
1109}
1110
1111void os::thread_local_storage_at_put(int index, void* value) {
1112  int rslt = pthread_setspecific((pthread_key_t)index, value);
1113  assert(rslt == 0, "pthread_setspecific failed");
1114}
1115
1116extern "C" Thread* get_thread() {
1117  return ThreadLocalStorage::thread();
1118}
1119
1120////////////////////////////////////////////////////////////////////////////////
1121// time support
1122
1123// Time since start-up in seconds to a fine granularity.
1124// Used by VMSelfDestructTimer and the MemProfiler.
1125double os::elapsedTime() {
1126  return (double)(os::elapsed_counter()) * 0.000001;
1127}
1128
1129jlong os::elapsed_counter() {
1130  timeval time;
1131  int status = gettimeofday(&time, NULL);
1132  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1133}
1134
1135jlong os::elapsed_frequency() {
1136  return (1000 * 1000);
1137}
1138
1139bool os::supports_vtime() { return true; }
1140bool os::enable_vtime()   { return false; }
1141bool os::vtime_enabled()  { return false; }
1142
1143double os::elapsedVTime() {
1144  struct rusage usage;
1145  int retval = getrusage(RUSAGE_THREAD, &usage);
1146  if (retval == 0) {
1147    return usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec) / (1000.0 * 1000);
1148  } else {
1149    // better than nothing, but not much
1150    return elapsedTime();
1151  }
1152}
1153
1154jlong os::javaTimeMillis() {
1155  timeval time;
1156  int status = gettimeofday(&time, NULL);
1157  assert(status != -1, "aix error at gettimeofday()");
1158  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1159}
1160
1161void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1162  timeval time;
1163  int status = gettimeofday(&time, NULL);
1164  assert(status != -1, "aix error at gettimeofday()");
1165  seconds = jlong(time.tv_sec);
1166  nanos = jlong(time.tv_usec) * 1000;
1167}
1168
1169
1170// We need to manually declare mread_real_time,
1171// because IBM didn't provide a prototype in time.h.
1172// (they probably only ever tested in C, not C++)
1173extern "C"
1174int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1175
1176jlong os::javaTimeNanos() {
1177  if (os::Aix::on_pase()) {
1178    Unimplemented();
1179    return 0;
1180  } else {
1181    // On AIX use the precision of processors real time clock
1182    // or time base registers.
1183    timebasestruct_t time;
1184    int rc;
1185
1186    // If the CPU has a time register, it will be used and
1187    // we have to convert to real time first. After convertion we have following data:
1188    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1189    // time.tb_low  [nanoseconds after the last full second above]
1190    // We better use mread_real_time here instead of read_real_time
1191    // to ensure that we will get a monotonic increasing time.
1192    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1193      rc = time_base_to_time(&time, TIMEBASE_SZ);
1194      assert(rc != -1, "aix error at time_base_to_time()");
1195    }
1196    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1197  }
1198}
1199
1200void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1201  info_ptr->max_value = ALL_64_BITS;
1202  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1203  info_ptr->may_skip_backward = false;
1204  info_ptr->may_skip_forward = false;
1205  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1206}
1207
1208// Return the real, user, and system times in seconds from an
1209// arbitrary fixed point in the past.
1210bool os::getTimesSecs(double* process_real_time,
1211                      double* process_user_time,
1212                      double* process_system_time) {
1213  struct tms ticks;
1214  clock_t real_ticks = times(&ticks);
1215
1216  if (real_ticks == (clock_t) (-1)) {
1217    return false;
1218  } else {
1219    double ticks_per_second = (double) clock_tics_per_sec;
1220    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1221    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1222    *process_real_time = ((double) real_ticks) / ticks_per_second;
1223
1224    return true;
1225  }
1226}
1227
1228char * os::local_time_string(char *buf, size_t buflen) {
1229  struct tm t;
1230  time_t long_time;
1231  time(&long_time);
1232  localtime_r(&long_time, &t);
1233  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1234               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1235               t.tm_hour, t.tm_min, t.tm_sec);
1236  return buf;
1237}
1238
1239struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1240  return localtime_r(clock, res);
1241}
1242
1243////////////////////////////////////////////////////////////////////////////////
1244// runtime exit support
1245
1246// Note: os::shutdown() might be called very early during initialization, or
1247// called from signal handler. Before adding something to os::shutdown(), make
1248// sure it is async-safe and can handle partially initialized VM.
1249void os::shutdown() {
1250
1251  // allow PerfMemory to attempt cleanup of any persistent resources
1252  perfMemory_exit();
1253
1254  // needs to remove object in file system
1255  AttachListener::abort();
1256
1257  // flush buffered output, finish log files
1258  ostream_abort();
1259
1260  // Check for abort hook
1261  abort_hook_t abort_hook = Arguments::abort_hook();
1262  if (abort_hook != NULL) {
1263    abort_hook();
1264  }
1265}
1266
1267// Note: os::abort() might be called very early during initialization, or
1268// called from signal handler. Before adding something to os::abort(), make
1269// sure it is async-safe and can handle partially initialized VM.
1270void os::abort(bool dump_core, void* siginfo, void* context) {
1271  os::shutdown();
1272  if (dump_core) {
1273#ifndef PRODUCT
1274    fdStream out(defaultStream::output_fd());
1275    out.print_raw("Current thread is ");
1276    char buf[16];
1277    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1278    out.print_raw_cr(buf);
1279    out.print_raw_cr("Dumping core ...");
1280#endif
1281    ::abort(); // dump core
1282  }
1283
1284  ::exit(1);
1285}
1286
1287// Die immediately, no exit hook, no abort hook, no cleanup.
1288void os::die() {
1289  ::abort();
1290}
1291
1292// This method is a copy of JDK's sysGetLastErrorString
1293// from src/solaris/hpi/src/system_md.c
1294
1295size_t os::lasterror(char *buf, size_t len) {
1296  if (errno == 0) return 0;
1297
1298  const char *s = ::strerror(errno);
1299  size_t n = ::strlen(s);
1300  if (n >= len) {
1301    n = len - 1;
1302  }
1303  ::strncpy(buf, s, n);
1304  buf[n] = '\0';
1305  return n;
1306}
1307
1308intx os::current_thread_id() { return (intx)pthread_self(); }
1309
1310int os::current_process_id() {
1311
1312  // This implementation returns a unique pid, the pid of the
1313  // launcher thread that starts the vm 'process'.
1314
1315  // Under POSIX, getpid() returns the same pid as the
1316  // launcher thread rather than a unique pid per thread.
1317  // Use gettid() if you want the old pre NPTL behaviour.
1318
1319  // if you are looking for the result of a call to getpid() that
1320  // returns a unique pid for the calling thread, then look at the
1321  // OSThread::thread_id() method in osThread_linux.hpp file
1322
1323  return (int)(_initial_pid ? _initial_pid : getpid());
1324}
1325
1326// DLL functions
1327
1328const char* os::dll_file_extension() { return ".so"; }
1329
1330// This must be hard coded because it's the system's temporary
1331// directory not the java application's temp directory, ala java.io.tmpdir.
1332const char* os::get_temp_directory() { return "/tmp"; }
1333
1334static bool file_exists(const char* filename) {
1335  struct stat statbuf;
1336  if (filename == NULL || strlen(filename) == 0) {
1337    return false;
1338  }
1339  return os::stat(filename, &statbuf) == 0;
1340}
1341
1342bool os::dll_build_name(char* buffer, size_t buflen,
1343                        const char* pname, const char* fname) {
1344  bool retval = false;
1345  // Copied from libhpi
1346  const size_t pnamelen = pname ? strlen(pname) : 0;
1347
1348  // Return error on buffer overflow.
1349  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1350    *buffer = '\0';
1351    return retval;
1352  }
1353
1354  if (pnamelen == 0) {
1355    snprintf(buffer, buflen, "lib%s.so", fname);
1356    retval = true;
1357  } else if (strchr(pname, *os::path_separator()) != NULL) {
1358    int n;
1359    char** pelements = split_path(pname, &n);
1360    for (int i = 0; i < n; i++) {
1361      // Really shouldn't be NULL, but check can't hurt
1362      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1363        continue; // skip the empty path values
1364      }
1365      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1366      if (file_exists(buffer)) {
1367        retval = true;
1368        break;
1369      }
1370    }
1371    // release the storage
1372    for (int i = 0; i < n; i++) {
1373      if (pelements[i] != NULL) {
1374        FREE_C_HEAP_ARRAY(char, pelements[i]);
1375      }
1376    }
1377    if (pelements != NULL) {
1378      FREE_C_HEAP_ARRAY(char*, pelements);
1379    }
1380  } else {
1381    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1382    retval = true;
1383  }
1384  return retval;
1385}
1386
1387// Check if addr is inside libjvm.so.
1388bool os::address_is_in_vm(address addr) {
1389
1390  // Input could be a real pc or a function pointer literal. The latter
1391  // would be a function descriptor residing in the data segment of a module.
1392
1393  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1394  if (lib) {
1395    if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1396      return true;
1397    } else {
1398      return false;
1399    }
1400  } else {
1401    lib = LoadedLibraries::find_for_data_address(addr);
1402    if (lib) {
1403      if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1404        return true;
1405      } else {
1406        return false;
1407      }
1408    } else {
1409      return false;
1410    }
1411  }
1412}
1413
1414// Resolve an AIX function descriptor literal to a code pointer.
1415// If the input is a valid code pointer to a text segment of a loaded module,
1416//   it is returned unchanged.
1417// If the input is a valid AIX function descriptor, it is resolved to the
1418//   code entry point.
1419// If the input is neither a valid function descriptor nor a valid code pointer,
1420//   NULL is returned.
1421static address resolve_function_descriptor_to_code_pointer(address p) {
1422
1423  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1424  if (lib) {
1425    // its a real code pointer
1426    return p;
1427  } else {
1428    lib = LoadedLibraries::find_for_data_address(p);
1429    if (lib) {
1430      // pointer to data segment, potential function descriptor
1431      address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1432      if (LoadedLibraries::find_for_text_address(code_entry)) {
1433        // Its a function descriptor
1434        return code_entry;
1435      }
1436    }
1437  }
1438  return NULL;
1439}
1440
1441bool os::dll_address_to_function_name(address addr, char *buf,
1442                                      int buflen, int *offset,
1443                                      bool demangle) {
1444  if (offset) {
1445    *offset = -1;
1446  }
1447  // Buf is not optional, but offset is optional.
1448  assert(buf != NULL, "sanity check");
1449  buf[0] = '\0';
1450
1451  // Resolve function ptr literals first.
1452  addr = resolve_function_descriptor_to_code_pointer(addr);
1453  if (!addr) {
1454    return false;
1455  }
1456
1457  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1458  return Decoder::decode(addr, buf, buflen, offset, demangle);
1459}
1460
1461static int getModuleName(codeptr_t pc,                    // [in] program counter
1462                         char* p_name, size_t namelen,    // [out] optional: function name
1463                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1464                         ) {
1465
1466  // initialize output parameters
1467  if (p_name && namelen > 0) {
1468    *p_name = '\0';
1469  }
1470  if (p_errmsg && errmsglen > 0) {
1471    *p_errmsg = '\0';
1472  }
1473
1474  const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1475  if (lib) {
1476    if (p_name && namelen > 0) {
1477      sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1478    }
1479    return 0;
1480  }
1481
1482  trcVerbose("pc outside any module");
1483
1484  return -1;
1485}
1486
1487bool os::dll_address_to_library_name(address addr, char* buf,
1488                                     int buflen, int* offset) {
1489  if (offset) {
1490    *offset = -1;
1491  }
1492  // Buf is not optional, but offset is optional.
1493  assert(buf != NULL, "sanity check");
1494  buf[0] = '\0';
1495
1496  // Resolve function ptr literals first.
1497  addr = resolve_function_descriptor_to_code_pointer(addr);
1498  if (!addr) {
1499    return false;
1500  }
1501
1502  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1503    return true;
1504  }
1505  return false;
1506}
1507
1508// Loads .dll/.so and in case of error it checks if .dll/.so was built
1509// for the same architecture as Hotspot is running on.
1510void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1511
1512  if (ebuf && ebuflen > 0) {
1513    ebuf[0] = '\0';
1514    ebuf[ebuflen - 1] = '\0';
1515  }
1516
1517  if (!filename || strlen(filename) == 0) {
1518    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1519    return NULL;
1520  }
1521
1522  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1523  void * result= ::dlopen(filename, RTLD_LAZY);
1524  if (result != NULL) {
1525    // Reload dll cache. Don't do this in signal handling.
1526    LoadedLibraries::reload();
1527    return result;
1528  } else {
1529    // error analysis when dlopen fails
1530    const char* const error_report = ::dlerror();
1531    if (error_report && ebuf && ebuflen > 0) {
1532      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1533               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1534    }
1535  }
1536  return NULL;
1537}
1538
1539void* os::dll_lookup(void* handle, const char* name) {
1540  void* res = dlsym(handle, name);
1541  return res;
1542}
1543
1544void* os::get_default_process_handle() {
1545  return (void*)::dlopen(NULL, RTLD_LAZY);
1546}
1547
1548void os::print_dll_info(outputStream *st) {
1549  st->print_cr("Dynamic libraries:");
1550  LoadedLibraries::print(st);
1551}
1552
1553void os::print_os_info(outputStream* st) {
1554  st->print("OS:");
1555
1556  st->print("uname:");
1557  struct utsname name;
1558  uname(&name);
1559  st->print(name.sysname); st->print(" ");
1560  st->print(name.nodename); st->print(" ");
1561  st->print(name.release); st->print(" ");
1562  st->print(name.version); st->print(" ");
1563  st->print(name.machine);
1564  st->cr();
1565
1566  // rlimit
1567  st->print("rlimit:");
1568  struct rlimit rlim;
1569
1570  st->print(" STACK ");
1571  getrlimit(RLIMIT_STACK, &rlim);
1572  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1573  else st->print("%uk", rlim.rlim_cur >> 10);
1574
1575  st->print(", CORE ");
1576  getrlimit(RLIMIT_CORE, &rlim);
1577  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1578  else st->print("%uk", rlim.rlim_cur >> 10);
1579
1580  st->print(", NPROC ");
1581  st->print("%d", sysconf(_SC_CHILD_MAX));
1582
1583  st->print(", NOFILE ");
1584  getrlimit(RLIMIT_NOFILE, &rlim);
1585  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1586  else st->print("%d", rlim.rlim_cur);
1587
1588  st->print(", AS ");
1589  getrlimit(RLIMIT_AS, &rlim);
1590  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1591  else st->print("%uk", rlim.rlim_cur >> 10);
1592
1593  // Print limits on DATA, because it limits the C-heap.
1594  st->print(", DATA ");
1595  getrlimit(RLIMIT_DATA, &rlim);
1596  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1597  else st->print("%uk", rlim.rlim_cur >> 10);
1598  st->cr();
1599
1600  // load average
1601  st->print("load average:");
1602  double loadavg[3] = {-1.L, -1.L, -1.L};
1603  os::loadavg(loadavg, 3);
1604  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1605  st->cr();
1606}
1607
1608void os::print_memory_info(outputStream* st) {
1609
1610  st->print_cr("Memory:");
1611
1612  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1613  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1614  st->print_cr("  Default shared memory page size:        %s",
1615    describe_pagesize(g_multipage_support.shmpsize));
1616  st->print_cr("  Can use 64K pages dynamically with shared meory:  %s",
1617    (g_multipage_support.can_use_64K_pages ? "yes" :"no"));
1618  st->print_cr("  Can use 16M pages dynamically with shared memory: %s",
1619    (g_multipage_support.can_use_16M_pages ? "yes" :"no"));
1620  if (g_multipage_error != 0) {
1621    st->print_cr("  multipage error: %d", g_multipage_error);
1622  }
1623
1624  // print out LDR_CNTRL because it affects the default page sizes
1625  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1626  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1627
1628  const char* const extshm = ::getenv("EXTSHM");
1629  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1630  if ( (strcmp(extshm, "on") == 0) || (strcmp(extshm, "ON") == 0) ) {
1631    st->print_cr("  *** Unsupported! Please remove EXTSHM from your environment! ***");
1632  }
1633
1634  // Call os::Aix::get_meminfo() to retrieve memory statistics.
1635  os::Aix::meminfo_t mi;
1636  if (os::Aix::get_meminfo(&mi)) {
1637    char buffer[256];
1638    if (os::Aix::on_aix()) {
1639      jio_snprintf(buffer, sizeof(buffer),
1640                   "  physical total : %llu\n"
1641                   "  physical free  : %llu\n"
1642                   "  swap total     : %llu\n"
1643                   "  swap free      : %llu\n",
1644                   mi.real_total,
1645                   mi.real_free,
1646                   mi.pgsp_total,
1647                   mi.pgsp_free);
1648    } else {
1649      Unimplemented();
1650    }
1651    st->print_raw(buffer);
1652  } else {
1653    st->print_cr("  (no more information available)");
1654  }
1655}
1656
1657void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1658  // cpu
1659  st->print("CPU:");
1660  st->print("total %d", os::processor_count());
1661  // It's not safe to query number of active processors after crash
1662  // st->print("(active %d)", os::active_processor_count());
1663  st->print(" %s", VM_Version::cpu_features());
1664  st->cr();
1665}
1666
1667void os::print_siginfo(outputStream* st, void* siginfo) {
1668  // Use common posix version.
1669  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1670  st->cr();
1671}
1672
1673static void print_signal_handler(outputStream* st, int sig,
1674                                 char* buf, size_t buflen);
1675
1676void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1677  st->print_cr("Signal Handlers:");
1678  print_signal_handler(st, SIGSEGV, buf, buflen);
1679  print_signal_handler(st, SIGBUS , buf, buflen);
1680  print_signal_handler(st, SIGFPE , buf, buflen);
1681  print_signal_handler(st, SIGPIPE, buf, buflen);
1682  print_signal_handler(st, SIGXFSZ, buf, buflen);
1683  print_signal_handler(st, SIGILL , buf, buflen);
1684  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1685  print_signal_handler(st, SR_signum, buf, buflen);
1686  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1687  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1688  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1689  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1690  print_signal_handler(st, SIGTRAP, buf, buflen);
1691  print_signal_handler(st, SIGDANGER, buf, buflen);
1692}
1693
1694static char saved_jvm_path[MAXPATHLEN] = {0};
1695
1696// Find the full path to the current module, libjvm.so.
1697void os::jvm_path(char *buf, jint buflen) {
1698  // Error checking.
1699  if (buflen < MAXPATHLEN) {
1700    assert(false, "must use a large-enough buffer");
1701    buf[0] = '\0';
1702    return;
1703  }
1704  // Lazy resolve the path to current module.
1705  if (saved_jvm_path[0] != 0) {
1706    strcpy(buf, saved_jvm_path);
1707    return;
1708  }
1709
1710  Dl_info dlinfo;
1711  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1712  assert(ret != 0, "cannot locate libjvm");
1713  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1714  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1715
1716  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1717  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1718}
1719
1720void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1721  // no prefix required, not even "_"
1722}
1723
1724void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1725  // no suffix required
1726}
1727
1728////////////////////////////////////////////////////////////////////////////////
1729// sun.misc.Signal support
1730
1731static volatile jint sigint_count = 0;
1732
1733static void
1734UserHandler(int sig, void *siginfo, void *context) {
1735  // 4511530 - sem_post is serialized and handled by the manager thread. When
1736  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1737  // don't want to flood the manager thread with sem_post requests.
1738  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1739    return;
1740
1741  // Ctrl-C is pressed during error reporting, likely because the error
1742  // handler fails to abort. Let VM die immediately.
1743  if (sig == SIGINT && is_error_reported()) {
1744    os::die();
1745  }
1746
1747  os::signal_notify(sig);
1748}
1749
1750void* os::user_handler() {
1751  return CAST_FROM_FN_PTR(void*, UserHandler);
1752}
1753
1754extern "C" {
1755  typedef void (*sa_handler_t)(int);
1756  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1757}
1758
1759void* os::signal(int signal_number, void* handler) {
1760  struct sigaction sigAct, oldSigAct;
1761
1762  sigfillset(&(sigAct.sa_mask));
1763
1764  // Do not block out synchronous signals in the signal handler.
1765  // Blocking synchronous signals only makes sense if you can really
1766  // be sure that those signals won't happen during signal handling,
1767  // when the blocking applies. Normal signal handlers are lean and
1768  // do not cause signals. But our signal handlers tend to be "risky"
1769  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1770  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1771  // by a SIGILL, which was blocked due to the signal mask. The process
1772  // just hung forever. Better to crash from a secondary signal than to hang.
1773  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1774  sigdelset(&(sigAct.sa_mask), SIGBUS);
1775  sigdelset(&(sigAct.sa_mask), SIGILL);
1776  sigdelset(&(sigAct.sa_mask), SIGFPE);
1777  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1778
1779  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1780
1781  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1782
1783  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1784    // -1 means registration failed
1785    return (void *)-1;
1786  }
1787
1788  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1789}
1790
1791void os::signal_raise(int signal_number) {
1792  ::raise(signal_number);
1793}
1794
1795//
1796// The following code is moved from os.cpp for making this
1797// code platform specific, which it is by its very nature.
1798//
1799
1800// Will be modified when max signal is changed to be dynamic
1801int os::sigexitnum_pd() {
1802  return NSIG;
1803}
1804
1805// a counter for each possible signal value
1806static volatile jint pending_signals[NSIG+1] = { 0 };
1807
1808// Linux(POSIX) specific hand shaking semaphore.
1809static sem_t sig_sem;
1810
1811void os::signal_init_pd() {
1812  // Initialize signal structures
1813  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1814
1815  // Initialize signal semaphore
1816  int rc = ::sem_init(&sig_sem, 0, 0);
1817  guarantee(rc != -1, "sem_init failed");
1818}
1819
1820void os::signal_notify(int sig) {
1821  Atomic::inc(&pending_signals[sig]);
1822  ::sem_post(&sig_sem);
1823}
1824
1825static int check_pending_signals(bool wait) {
1826  Atomic::store(0, &sigint_count);
1827  for (;;) {
1828    for (int i = 0; i < NSIG + 1; i++) {
1829      jint n = pending_signals[i];
1830      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1831        return i;
1832      }
1833    }
1834    if (!wait) {
1835      return -1;
1836    }
1837    JavaThread *thread = JavaThread::current();
1838    ThreadBlockInVM tbivm(thread);
1839
1840    bool threadIsSuspended;
1841    do {
1842      thread->set_suspend_equivalent();
1843      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1844
1845      ::sem_wait(&sig_sem);
1846
1847      // were we externally suspended while we were waiting?
1848      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1849      if (threadIsSuspended) {
1850        //
1851        // The semaphore has been incremented, but while we were waiting
1852        // another thread suspended us. We don't want to continue running
1853        // while suspended because that would surprise the thread that
1854        // suspended us.
1855        //
1856        ::sem_post(&sig_sem);
1857
1858        thread->java_suspend_self();
1859      }
1860    } while (threadIsSuspended);
1861  }
1862}
1863
1864int os::signal_lookup() {
1865  return check_pending_signals(false);
1866}
1867
1868int os::signal_wait() {
1869  return check_pending_signals(true);
1870}
1871
1872////////////////////////////////////////////////////////////////////////////////
1873// Virtual Memory
1874
1875// We need to keep small simple bookkeeping for os::reserve_memory and friends.
1876
1877#define VMEM_MAPPED  1
1878#define VMEM_SHMATED 2
1879
1880struct vmembk_t {
1881  int type;         // 1 - mmap, 2 - shmat
1882  char* addr;
1883  size_t size;      // Real size, may be larger than usersize.
1884  size_t pagesize;  // page size of area
1885  vmembk_t* next;
1886
1887  bool contains_addr(char* p) const {
1888    return p >= addr && p < (addr + size);
1889  }
1890
1891  bool contains_range(char* p, size_t s) const {
1892    return contains_addr(p) && contains_addr(p + s - 1);
1893  }
1894
1895  void print_on(outputStream* os) const {
1896    os->print("[" PTR_FORMAT " - " PTR_FORMAT "] (" UINTX_FORMAT
1897      " bytes, %d %s pages), %s",
1898      addr, addr + size - 1, size, size / pagesize, describe_pagesize(pagesize),
1899      (type == VMEM_SHMATED ? "shmat" : "mmap")
1900    );
1901  }
1902
1903  // Check that range is a sub range of memory block (or equal to memory block);
1904  // also check that range is fully page aligned to the page size if the block.
1905  void assert_is_valid_subrange(char* p, size_t s) const {
1906    if (!contains_range(p, s)) {
1907      fprintf(stderr, "[" PTR_FORMAT " - " PTR_FORMAT "] is not a sub "
1908              "range of [" PTR_FORMAT " - " PTR_FORMAT "].\n",
1909              p, p + s - 1, addr, addr + size - 1);
1910      guarantee0(false);
1911    }
1912    if (!is_aligned_to(p, pagesize) || !is_aligned_to(p + s, pagesize)) {
1913      fprintf(stderr, "range [" PTR_FORMAT " - " PTR_FORMAT "] is not"
1914              " aligned to pagesize (%s)\n", p, p + s);
1915      guarantee0(false);
1916    }
1917  }
1918};
1919
1920static struct {
1921  vmembk_t* first;
1922  MiscUtils::CritSect cs;
1923} vmem;
1924
1925static void vmembk_add(char* addr, size_t size, size_t pagesize, int type) {
1926  vmembk_t* p = (vmembk_t*) ::malloc(sizeof(vmembk_t));
1927  assert0(p);
1928  if (p) {
1929    MiscUtils::AutoCritSect lck(&vmem.cs);
1930    p->addr = addr; p->size = size;
1931    p->pagesize = pagesize;
1932    p->type = type;
1933    p->next = vmem.first;
1934    vmem.first = p;
1935  }
1936}
1937
1938static vmembk_t* vmembk_find(char* addr) {
1939  MiscUtils::AutoCritSect lck(&vmem.cs);
1940  for (vmembk_t* p = vmem.first; p; p = p->next) {
1941    if (p->addr <= addr && (p->addr + p->size) > addr) {
1942      return p;
1943    }
1944  }
1945  return NULL;
1946}
1947
1948static void vmembk_remove(vmembk_t* p0) {
1949  MiscUtils::AutoCritSect lck(&vmem.cs);
1950  assert0(p0);
1951  assert0(vmem.first); // List should not be empty.
1952  for (vmembk_t** pp = &(vmem.first); *pp; pp = &((*pp)->next)) {
1953    if (*pp == p0) {
1954      *pp = p0->next;
1955      ::free(p0);
1956      return;
1957    }
1958  }
1959  assert0(false); // Not found?
1960}
1961
1962static void vmembk_print_on(outputStream* os) {
1963  MiscUtils::AutoCritSect lck(&vmem.cs);
1964  for (vmembk_t* vmi = vmem.first; vmi; vmi = vmi->next) {
1965    vmi->print_on(os);
1966    os->cr();
1967  }
1968}
1969
1970// Reserve and attach a section of System V memory.
1971// If <requested_addr> is not NULL, function will attempt to attach the memory at the given
1972// address. Failing that, it will attach the memory anywhere.
1973// If <requested_addr> is NULL, function will attach the memory anywhere.
1974//
1975// <alignment_hint> is being ignored by this function. It is very probable however that the
1976// alignment requirements are met anyway, because shmat() attaches at 256M boundaries.
1977// Should this be not enogh, we can put more work into it.
1978static char* reserve_shmated_memory (
1979  size_t bytes,
1980  char* requested_addr,
1981  size_t alignment_hint) {
1982
1983  trcVerbose("reserve_shmated_memory " UINTX_FORMAT " bytes, wishaddress "
1984    PTR_FORMAT ", alignment_hint " UINTX_FORMAT "...",
1985    bytes, requested_addr, alignment_hint);
1986
1987  // Either give me wish address or wish alignment but not both.
1988  assert0(!(requested_addr != NULL && alignment_hint != 0));
1989
1990  // We must prevent anyone from attaching too close to the
1991  // BRK because that may cause malloc OOM.
1992  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
1993    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
1994      "Will attach anywhere.", requested_addr);
1995    // Act like the OS refused to attach there.
1996    requested_addr = NULL;
1997  }
1998
1999  // For old AS/400's (V5R4 and older) we should not even be here - System V shared memory is not
2000  // really supported (max size 4GB), so reserve_mmapped_memory should have been used instead.
2001  if (os::Aix::on_pase_V5R4_or_older()) {
2002    ShouldNotReachHere();
2003  }
2004
2005  // Align size of shm up to 64K to avoid errors if we later try to change the page size.
2006  const size_t size = align_size_up(bytes, SIZE_64K);
2007
2008  // Reserve the shared segment.
2009  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2010  if (shmid == -1) {
2011    trc("shmget(.., " UINTX_FORMAT ", ..) failed (errno: %d).", size, errno);
2012    return NULL;
2013  }
2014
2015  // Important note:
2016  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2017  // We must right after attaching it remove it from the system. System V shm segments are global and
2018  // survive the process.
2019  // So, from here on: Do not assert, do not return, until we have called shmctl(IPC_RMID) (A).
2020
2021  struct shmid_ds shmbuf;
2022  memset(&shmbuf, 0, sizeof(shmbuf));
2023  shmbuf.shm_pagesize = SIZE_64K;
2024  if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) != 0) {
2025    trcVerbose("Failed to set page size (need " UINTX_FORMAT " 64K pages) - shmctl failed with %d.",
2026               size / SIZE_64K, errno);
2027    // I want to know if this ever happens.
2028    assert(false, "failed to set page size for shmat");
2029  }
2030
2031  // Now attach the shared segment.
2032  // Note that I attach with SHM_RND - which means that the requested address is rounded down, if
2033  // needed, to the next lowest segment boundary. Otherwise the attach would fail if the address
2034  // were not a segment boundary.
2035  char* const addr = (char*) shmat(shmid, requested_addr, SHM_RND);
2036  const int errno_shmat = errno;
2037
2038  // (A) Right after shmat and before handing shmat errors delete the shm segment.
2039  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2040    trc("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2041    assert(false, "failed to remove shared memory segment!");
2042  }
2043
2044  // Handle shmat error. If we failed to attach, just return.
2045  if (addr == (char*)-1) {
2046    trcVerbose("Failed to attach segment at " PTR_FORMAT " (%d).", requested_addr, errno_shmat);
2047    return NULL;
2048  }
2049
2050  // Just for info: query the real page size. In case setting the page size did not
2051  // work (see above), the system may have given us something other then 4K (LDR_CNTRL).
2052  const size_t real_pagesize = os::Aix::query_pagesize(addr);
2053  if (real_pagesize != shmbuf.shm_pagesize) {
2054    trcVerbose("pagesize is, surprisingly, %h.", real_pagesize);
2055  }
2056
2057  if (addr) {
2058    trcVerbose("shm-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes, " UINTX_FORMAT " %s pages)",
2059      addr, addr + size - 1, size, size/real_pagesize, describe_pagesize(real_pagesize));
2060  } else {
2061    if (requested_addr != NULL) {
2062      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at with address " PTR_FORMAT ".", size, requested_addr);
2063    } else {
2064      trcVerbose("failed to shm-allocate " UINTX_FORMAT " bytes at any address.", size);
2065    }
2066  }
2067
2068  // book-keeping
2069  vmembk_add(addr, size, real_pagesize, VMEM_SHMATED);
2070  assert0(is_aligned_to(addr, os::vm_page_size()));
2071
2072  return addr;
2073}
2074
2075static bool release_shmated_memory(char* addr, size_t size) {
2076
2077  trcVerbose("release_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2078    addr, addr + size - 1);
2079
2080  bool rc = false;
2081
2082  // TODO: is there a way to verify shm size without doing bookkeeping?
2083  if (::shmdt(addr) != 0) {
2084    trcVerbose("error (%d).", errno);
2085  } else {
2086    trcVerbose("ok.");
2087    rc = true;
2088  }
2089  return rc;
2090}
2091
2092static bool uncommit_shmated_memory(char* addr, size_t size) {
2093  trcVerbose("uncommit_shmated_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2094    addr, addr + size - 1);
2095
2096  const bool rc = my_disclaim64(addr, size);
2097
2098  if (!rc) {
2099    trcVerbose("my_disclaim64(" PTR_FORMAT ", " UINTX_FORMAT ") failed.\n", addr, size);
2100    return false;
2101  }
2102  return true;
2103}
2104
2105// Reserve memory via mmap.
2106// If <requested_addr> is given, an attempt is made to attach at the given address.
2107// Failing that, memory is allocated at any address.
2108// If <alignment_hint> is given and <requested_addr> is NULL, an attempt is made to
2109// allocate at an address aligned with the given alignment. Failing that, memory
2110// is aligned anywhere.
2111static char* reserve_mmaped_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2112  trcVerbose("reserve_mmaped_memory " UINTX_FORMAT " bytes, wishaddress " PTR_FORMAT ", "
2113    "alignment_hint " UINTX_FORMAT "...",
2114    bytes, requested_addr, alignment_hint);
2115
2116  // If a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2117  if (requested_addr && !is_aligned_to(requested_addr, os::vm_page_size()) != 0) {
2118    trcVerbose("Wish address " PTR_FORMAT " not aligned to page boundary.", requested_addr);
2119    return NULL;
2120  }
2121
2122  // We must prevent anyone from attaching too close to the
2123  // BRK because that may cause malloc OOM.
2124  if (requested_addr != NULL && is_close_to_brk((address)requested_addr)) {
2125    trcVerbose("Wish address " PTR_FORMAT " is too close to the BRK segment. "
2126      "Will attach anywhere.", requested_addr);
2127    // Act like the OS refused to attach there.
2128    requested_addr = NULL;
2129  }
2130
2131  // Specify one or the other but not both.
2132  assert0(!(requested_addr != NULL && alignment_hint > 0));
2133
2134  // In 64K mode, we claim the global page size (os::vm_page_size())
2135  // is 64K. This is one of the few points where that illusion may
2136  // break, because mmap() will always return memory aligned to 4K. So
2137  // we must ensure we only ever return memory aligned to 64k.
2138  if (alignment_hint) {
2139    alignment_hint = lcm(alignment_hint, os::vm_page_size());
2140  } else {
2141    alignment_hint = os::vm_page_size();
2142  }
2143
2144  // Size shall always be a multiple of os::vm_page_size (esp. in 64K mode).
2145  const size_t size = align_size_up(bytes, os::vm_page_size());
2146
2147  // alignment: Allocate memory large enough to include an aligned range of the right size and
2148  // cut off the leading and trailing waste pages.
2149  assert0(alignment_hint != 0 && is_aligned_to(alignment_hint, os::vm_page_size())); // see above
2150  const size_t extra_size = size + alignment_hint;
2151
2152  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2153  // later use msync(MS_INVALIDATE) (see os::uncommit_memory).
2154  int flags = MAP_ANONYMOUS | MAP_SHARED;
2155
2156  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2157  // it means if wishaddress is given but MAP_FIXED is not set.
2158  //
2159  // Important! Behaviour differs depending on whether SPEC1170 mode is active or not.
2160  // SPEC1170 mode active: behaviour like POSIX, MAP_FIXED will clobber existing mappings.
2161  // SPEC1170 mode not active: behaviour, unlike POSIX, is that no existing mappings will
2162  // get clobbered.
2163  if (requested_addr != NULL) {
2164    if (!os::Aix::xpg_sus_mode()) {  // not SPEC1170 Behaviour
2165      flags |= MAP_FIXED;
2166    }
2167  }
2168
2169  char* addr = (char*)::mmap(requested_addr, extra_size,
2170      PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2171
2172  if (addr == MAP_FAILED) {
2173    trcVerbose("mmap(" PTR_FORMAT ", " UINTX_FORMAT ", ..) failed (%d)", requested_addr, size, errno);
2174    return NULL;
2175  }
2176
2177  // Handle alignment.
2178  char* const addr_aligned = (char *)align_ptr_up(addr, alignment_hint);
2179  const size_t waste_pre = addr_aligned - addr;
2180  char* const addr_aligned_end = addr_aligned + size;
2181  const size_t waste_post = extra_size - waste_pre - size;
2182  if (waste_pre > 0) {
2183    ::munmap(addr, waste_pre);
2184  }
2185  if (waste_post > 0) {
2186    ::munmap(addr_aligned_end, waste_post);
2187  }
2188  addr = addr_aligned;
2189
2190  if (addr) {
2191    trcVerbose("mmap-allocated " PTR_FORMAT " .. " PTR_FORMAT " (" UINTX_FORMAT " bytes)",
2192      addr, addr + bytes, bytes);
2193  } else {
2194    if (requested_addr != NULL) {
2195      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at wish address " PTR_FORMAT ".", bytes, requested_addr);
2196    } else {
2197      trcVerbose("failed to mmap-allocate " UINTX_FORMAT " bytes at any address.", bytes);
2198    }
2199  }
2200
2201  // bookkeeping
2202  vmembk_add(addr, size, SIZE_4K, VMEM_MAPPED);
2203
2204  // Test alignment, see above.
2205  assert0(is_aligned_to(addr, os::vm_page_size()));
2206
2207  return addr;
2208}
2209
2210static bool release_mmaped_memory(char* addr, size_t size) {
2211  assert0(is_aligned_to(addr, os::vm_page_size()));
2212  assert0(is_aligned_to(size, os::vm_page_size()));
2213
2214  trcVerbose("release_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2215    addr, addr + size - 1);
2216  bool rc = false;
2217
2218  if (::munmap(addr, size) != 0) {
2219    trcVerbose("failed (%d)\n", errno);
2220    rc = false;
2221  } else {
2222    trcVerbose("ok.");
2223    rc = true;
2224  }
2225
2226  return rc;
2227}
2228
2229static bool uncommit_mmaped_memory(char* addr, size_t size) {
2230
2231  assert0(is_aligned_to(addr, os::vm_page_size()));
2232  assert0(is_aligned_to(size, os::vm_page_size()));
2233
2234  trcVerbose("uncommit_mmaped_memory [" PTR_FORMAT " - " PTR_FORMAT "].",
2235    addr, addr + size - 1);
2236  bool rc = false;
2237
2238  // Uncommit mmap memory with msync MS_INVALIDATE.
2239  if (::msync(addr, size, MS_INVALIDATE) != 0) {
2240    trcVerbose("failed (%d)\n", errno);
2241    rc = false;
2242  } else {
2243    trcVerbose("ok.");
2244    rc = true;
2245  }
2246
2247  return rc;
2248}
2249
2250// End: shared memory bookkeeping
2251////////////////////////////////////////////////////////////////////////////////////////////////////
2252
2253int os::vm_page_size() {
2254  // Seems redundant as all get out.
2255  assert(os::Aix::page_size() != -1, "must call os::init");
2256  return os::Aix::page_size();
2257}
2258
2259// Aix allocates memory by pages.
2260int os::vm_allocation_granularity() {
2261  assert(os::Aix::page_size() != -1, "must call os::init");
2262  return os::Aix::page_size();
2263}
2264
2265#ifdef PRODUCT
2266static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
2267                                    int err) {
2268  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
2269          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
2270          strerror(err), err);
2271}
2272#endif
2273
2274void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2275                                  const char* mesg) {
2276  assert(mesg != NULL, "mesg must be specified");
2277  if (!pd_commit_memory(addr, size, exec)) {
2278    // Add extra info in product mode for vm_exit_out_of_memory():
2279    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
2280    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
2281  }
2282}
2283
2284bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2285
2286  assert0(is_aligned_to(addr, os::vm_page_size()));
2287  assert0(is_aligned_to(size, os::vm_page_size()));
2288
2289  vmembk_t* const vmi = vmembk_find(addr);
2290  assert0(vmi);
2291  vmi->assert_is_valid_subrange(addr, size);
2292
2293  trcVerbose("commit_memory [" PTR_FORMAT " - " PTR_FORMAT "].", addr, addr + size - 1);
2294
2295  return true;
2296}
2297
2298bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
2299  return pd_commit_memory(addr, size, exec);
2300}
2301
2302void os::pd_commit_memory_or_exit(char* addr, size_t size,
2303                                  size_t alignment_hint, bool exec,
2304                                  const char* mesg) {
2305  // Alignment_hint is ignored on this OS.
2306  pd_commit_memory_or_exit(addr, size, exec, mesg);
2307}
2308
2309bool os::pd_uncommit_memory(char* addr, size_t size) {
2310  assert0(is_aligned_to(addr, os::vm_page_size()));
2311  assert0(is_aligned_to(size, os::vm_page_size()));
2312
2313  // Dynamically do different things for mmap/shmat.
2314  const vmembk_t* const vmi = vmembk_find(addr);
2315  assert0(vmi);
2316  vmi->assert_is_valid_subrange(addr, size);
2317
2318  if (vmi->type == VMEM_SHMATED) {
2319    return uncommit_shmated_memory(addr, size);
2320  } else {
2321    return uncommit_mmaped_memory(addr, size);
2322  }
2323}
2324
2325bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2326  // Do not call this; no need to commit stack pages on AIX.
2327  ShouldNotReachHere();
2328  return true;
2329}
2330
2331bool os::remove_stack_guard_pages(char* addr, size_t size) {
2332  // Do not call this; no need to commit stack pages on AIX.
2333  ShouldNotReachHere();
2334  return true;
2335}
2336
2337void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2338}
2339
2340void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2341}
2342
2343void os::numa_make_global(char *addr, size_t bytes) {
2344}
2345
2346void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2347}
2348
2349bool os::numa_topology_changed() {
2350  return false;
2351}
2352
2353size_t os::numa_get_groups_num() {
2354  return 1;
2355}
2356
2357int os::numa_get_group_id() {
2358  return 0;
2359}
2360
2361size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2362  if (size > 0) {
2363    ids[0] = 0;
2364    return 1;
2365  }
2366  return 0;
2367}
2368
2369bool os::get_page_info(char *start, page_info* info) {
2370  return false;
2371}
2372
2373char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2374  return end;
2375}
2376
2377// Reserves and attaches a shared memory segment.
2378// Will assert if a wish address is given and could not be obtained.
2379char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2380
2381  // All other Unices do a mmap(MAP_FIXED) if the addr is given,
2382  // thereby clobbering old mappings at that place. That is probably
2383  // not intended, never used and almost certainly an error were it
2384  // ever be used this way (to try attaching at a specified address
2385  // without clobbering old mappings an alternate API exists,
2386  // os::attempt_reserve_memory_at()).
2387  // Instead of mimicking the dangerous coding of the other platforms, here I
2388  // just ignore the request address (release) or assert(debug).
2389  assert0(requested_addr == NULL);
2390
2391  // Always round to os::vm_page_size(), which may be larger than 4K.
2392  bytes = align_size_up(bytes, os::vm_page_size());
2393  const size_t alignment_hint0 =
2394    alignment_hint ? align_size_up(alignment_hint, os::vm_page_size()) : 0;
2395
2396  // In 4K mode always use mmap.
2397  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2398  if (os::vm_page_size() == SIZE_4K) {
2399    return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2400  } else {
2401    if (bytes >= Use64KPagesThreshold) {
2402      return reserve_shmated_memory(bytes, requested_addr, alignment_hint);
2403    } else {
2404      return reserve_mmaped_memory(bytes, requested_addr, alignment_hint);
2405    }
2406  }
2407}
2408
2409bool os::pd_release_memory(char* addr, size_t size) {
2410
2411  // Dynamically do different things for mmap/shmat.
2412  vmembk_t* const vmi = vmembk_find(addr);
2413  assert0(vmi);
2414
2415  // Always round to os::vm_page_size(), which may be larger than 4K.
2416  size = align_size_up(size, os::vm_page_size());
2417  addr = (char *)align_ptr_up(addr, os::vm_page_size());
2418
2419  bool rc = false;
2420  bool remove_bookkeeping = false;
2421  if (vmi->type == VMEM_SHMATED) {
2422    // For shmatted memory, we do:
2423    // - If user wants to release the whole range, release the memory (shmdt).
2424    // - If user only wants to release a partial range, uncommit (disclaim) that
2425    //   range. That way, at least, we do not use memory anymore (bust still page
2426    //   table space).
2427    vmi->assert_is_valid_subrange(addr, size);
2428    if (addr == vmi->addr && size == vmi->size) {
2429      rc = release_shmated_memory(addr, size);
2430      remove_bookkeeping = true;
2431    } else {
2432      rc = uncommit_shmated_memory(addr, size);
2433    }
2434  } else {
2435    // User may unmap partial regions but region has to be fully contained.
2436#ifdef ASSERT
2437    vmi->assert_is_valid_subrange(addr, size);
2438#endif
2439    rc = release_mmaped_memory(addr, size);
2440    remove_bookkeeping = true;
2441  }
2442
2443  // update bookkeeping
2444  if (rc && remove_bookkeeping) {
2445    vmembk_remove(vmi);
2446  }
2447
2448  return rc;
2449}
2450
2451static bool checked_mprotect(char* addr, size_t size, int prot) {
2452
2453  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2454  // not tell me if protection failed when trying to protect an un-protectable range.
2455  //
2456  // This means if the memory was allocated using shmget/shmat, protection wont work
2457  // but mprotect will still return 0:
2458  //
2459  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2460
2461  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2462
2463  if (!rc) {
2464    const char* const s_errno = strerror(errno);
2465    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2466    return false;
2467  }
2468
2469  // mprotect success check
2470  //
2471  // Mprotect said it changed the protection but can I believe it?
2472  //
2473  // To be sure I need to check the protection afterwards. Try to
2474  // read from protected memory and check whether that causes a segfault.
2475  //
2476  if (!os::Aix::xpg_sus_mode()) {
2477
2478    if (CanUseSafeFetch32()) {
2479
2480      const bool read_protected =
2481        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2482         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2483
2484      if (prot & PROT_READ) {
2485        rc = !read_protected;
2486      } else {
2487        rc = read_protected;
2488      }
2489    }
2490  }
2491  if (!rc) {
2492    assert(false, "mprotect failed.");
2493  }
2494  return rc;
2495}
2496
2497// Set protections specified
2498bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2499  unsigned int p = 0;
2500  switch (prot) {
2501  case MEM_PROT_NONE: p = PROT_NONE; break;
2502  case MEM_PROT_READ: p = PROT_READ; break;
2503  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2504  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2505  default:
2506    ShouldNotReachHere();
2507  }
2508  // is_committed is unused.
2509  return checked_mprotect(addr, size, p);
2510}
2511
2512bool os::guard_memory(char* addr, size_t size) {
2513  return checked_mprotect(addr, size, PROT_NONE);
2514}
2515
2516bool os::unguard_memory(char* addr, size_t size) {
2517  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2518}
2519
2520// Large page support
2521
2522static size_t _large_page_size = 0;
2523
2524// Enable large page support if OS allows that.
2525void os::large_page_init() {
2526  return; // Nothing to do. See query_multipage_support and friends.
2527}
2528
2529char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2530  // "exec" is passed in but not used. Creating the shared image for
2531  // the code cache doesn't have an SHM_X executable permission to check.
2532  Unimplemented();
2533  return 0;
2534}
2535
2536bool os::release_memory_special(char* base, size_t bytes) {
2537  // Detaching the SHM segment will also delete it, see reserve_memory_special().
2538  Unimplemented();
2539  return false;
2540}
2541
2542size_t os::large_page_size() {
2543  return _large_page_size;
2544}
2545
2546bool os::can_commit_large_page_memory() {
2547  // Does not matter, we do not support huge pages.
2548  return false;
2549}
2550
2551bool os::can_execute_large_page_memory() {
2552  // Does not matter, we do not support huge pages.
2553  return false;
2554}
2555
2556// Reserve memory at an arbitrary address, only if that area is
2557// available (and not reserved for something else).
2558char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2559  char* addr = NULL;
2560
2561  // Always round to os::vm_page_size(), which may be larger than 4K.
2562  bytes = align_size_up(bytes, os::vm_page_size());
2563
2564  // In 4K mode always use mmap.
2565  // In 64K mode allocate small sizes with mmap, large ones with 64K shmatted.
2566  if (os::vm_page_size() == SIZE_4K) {
2567    return reserve_mmaped_memory(bytes, requested_addr, 0);
2568  } else {
2569    if (bytes >= Use64KPagesThreshold) {
2570      return reserve_shmated_memory(bytes, requested_addr, 0);
2571    } else {
2572      return reserve_mmaped_memory(bytes, requested_addr, 0);
2573    }
2574  }
2575
2576  return addr;
2577}
2578
2579size_t os::read(int fd, void *buf, unsigned int nBytes) {
2580  return ::read(fd, buf, nBytes);
2581}
2582
2583size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2584  return ::pread(fd, buf, nBytes, offset);
2585}
2586
2587void os::naked_short_sleep(jlong ms) {
2588  struct timespec req;
2589
2590  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2591  req.tv_sec = 0;
2592  if (ms > 0) {
2593    req.tv_nsec = (ms % 1000) * 1000000;
2594  }
2595  else {
2596    req.tv_nsec = 1;
2597  }
2598
2599  nanosleep(&req, NULL);
2600
2601  return;
2602}
2603
2604// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2605void os::infinite_sleep() {
2606  while (true) {    // sleep forever ...
2607    ::sleep(100);   // ... 100 seconds at a time
2608  }
2609}
2610
2611// Used to convert frequent JVM_Yield() to nops
2612bool os::dont_yield() {
2613  return DontYieldALot;
2614}
2615
2616void os::naked_yield() {
2617  sched_yield();
2618}
2619
2620////////////////////////////////////////////////////////////////////////////////
2621// thread priority support
2622
2623// From AIX manpage to pthread_setschedparam
2624// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2625//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2626//
2627// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2628// range from 40 to 80, where 40 is the least favored priority and 80
2629// is the most favored."
2630//
2631// (Actually, I doubt this even has an impact on AIX, as we do kernel
2632// scheduling there; however, this still leaves iSeries.)
2633//
2634// We use the same values for AIX and PASE.
2635int os::java_to_os_priority[CriticalPriority + 1] = {
2636  54,             // 0 Entry should never be used
2637
2638  55,             // 1 MinPriority
2639  55,             // 2
2640  56,             // 3
2641
2642  56,             // 4
2643  57,             // 5 NormPriority
2644  57,             // 6
2645
2646  58,             // 7
2647  58,             // 8
2648  59,             // 9 NearMaxPriority
2649
2650  60,             // 10 MaxPriority
2651
2652  60              // 11 CriticalPriority
2653};
2654
2655OSReturn os::set_native_priority(Thread* thread, int newpri) {
2656  if (!UseThreadPriorities) return OS_OK;
2657  pthread_t thr = thread->osthread()->pthread_id();
2658  int policy = SCHED_OTHER;
2659  struct sched_param param;
2660  param.sched_priority = newpri;
2661  int ret = pthread_setschedparam(thr, policy, &param);
2662
2663  if (ret != 0) {
2664    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
2665        (int)thr, newpri, ret, strerror(ret));
2666  }
2667  return (ret == 0) ? OS_OK : OS_ERR;
2668}
2669
2670OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2671  if (!UseThreadPriorities) {
2672    *priority_ptr = java_to_os_priority[NormPriority];
2673    return OS_OK;
2674  }
2675  pthread_t thr = thread->osthread()->pthread_id();
2676  int policy = SCHED_OTHER;
2677  struct sched_param param;
2678  int ret = pthread_getschedparam(thr, &policy, &param);
2679  *priority_ptr = param.sched_priority;
2680
2681  return (ret == 0) ? OS_OK : OS_ERR;
2682}
2683
2684// Hint to the underlying OS that a task switch would not be good.
2685// Void return because it's a hint and can fail.
2686void os::hint_no_preempt() {}
2687
2688////////////////////////////////////////////////////////////////////////////////
2689// suspend/resume support
2690
2691//  the low-level signal-based suspend/resume support is a remnant from the
2692//  old VM-suspension that used to be for java-suspension, safepoints etc,
2693//  within hotspot. Now there is a single use-case for this:
2694//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2695//      that runs in the watcher thread.
2696//  The remaining code is greatly simplified from the more general suspension
2697//  code that used to be used.
2698//
2699//  The protocol is quite simple:
2700//  - suspend:
2701//      - sends a signal to the target thread
2702//      - polls the suspend state of the osthread using a yield loop
2703//      - target thread signal handler (SR_handler) sets suspend state
2704//        and blocks in sigsuspend until continued
2705//  - resume:
2706//      - sets target osthread state to continue
2707//      - sends signal to end the sigsuspend loop in the SR_handler
2708//
2709//  Note that the SR_lock plays no role in this suspend/resume protocol.
2710//
2711
2712static void resume_clear_context(OSThread *osthread) {
2713  osthread->set_ucontext(NULL);
2714  osthread->set_siginfo(NULL);
2715}
2716
2717static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2718  osthread->set_ucontext(context);
2719  osthread->set_siginfo(siginfo);
2720}
2721
2722//
2723// Handler function invoked when a thread's execution is suspended or
2724// resumed. We have to be careful that only async-safe functions are
2725// called here (Note: most pthread functions are not async safe and
2726// should be avoided.)
2727//
2728// Note: sigwait() is a more natural fit than sigsuspend() from an
2729// interface point of view, but sigwait() prevents the signal hander
2730// from being run. libpthread would get very confused by not having
2731// its signal handlers run and prevents sigwait()'s use with the
2732// mutex granting granting signal.
2733//
2734// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2735//
2736static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2737  // Save and restore errno to avoid confusing native code with EINTR
2738  // after sigsuspend.
2739  int old_errno = errno;
2740
2741  Thread* thread = Thread::current();
2742  OSThread* osthread = thread->osthread();
2743  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2744
2745  os::SuspendResume::State current = osthread->sr.state();
2746  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2747    suspend_save_context(osthread, siginfo, context);
2748
2749    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2750    os::SuspendResume::State state = osthread->sr.suspended();
2751    if (state == os::SuspendResume::SR_SUSPENDED) {
2752      sigset_t suspend_set;  // signals for sigsuspend()
2753
2754      // get current set of blocked signals and unblock resume signal
2755      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2756      sigdelset(&suspend_set, SR_signum);
2757
2758      // wait here until we are resumed
2759      while (1) {
2760        sigsuspend(&suspend_set);
2761
2762        os::SuspendResume::State result = osthread->sr.running();
2763        if (result == os::SuspendResume::SR_RUNNING) {
2764          break;
2765        }
2766      }
2767
2768    } else if (state == os::SuspendResume::SR_RUNNING) {
2769      // request was cancelled, continue
2770    } else {
2771      ShouldNotReachHere();
2772    }
2773
2774    resume_clear_context(osthread);
2775  } else if (current == os::SuspendResume::SR_RUNNING) {
2776    // request was cancelled, continue
2777  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2778    // ignore
2779  } else {
2780    ShouldNotReachHere();
2781  }
2782
2783  errno = old_errno;
2784}
2785
2786static int SR_initialize() {
2787  struct sigaction act;
2788  char *s;
2789  // Get signal number to use for suspend/resume
2790  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2791    int sig = ::strtol(s, 0, 10);
2792    if (sig > 0 || sig < NSIG) {
2793      SR_signum = sig;
2794    }
2795  }
2796
2797  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2798        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2799
2800  sigemptyset(&SR_sigset);
2801  sigaddset(&SR_sigset, SR_signum);
2802
2803  // Set up signal handler for suspend/resume.
2804  act.sa_flags = SA_RESTART|SA_SIGINFO;
2805  act.sa_handler = (void (*)(int)) SR_handler;
2806
2807  // SR_signum is blocked by default.
2808  // 4528190 - We also need to block pthread restart signal (32 on all
2809  // supported Linux platforms). Note that LinuxThreads need to block
2810  // this signal for all threads to work properly. So we don't have
2811  // to use hard-coded signal number when setting up the mask.
2812  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
2813
2814  if (sigaction(SR_signum, &act, 0) == -1) {
2815    return -1;
2816  }
2817
2818  // Save signal flag
2819  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
2820  return 0;
2821}
2822
2823static int SR_finalize() {
2824  return 0;
2825}
2826
2827static int sr_notify(OSThread* osthread) {
2828  int status = pthread_kill(osthread->pthread_id(), SR_signum);
2829  assert_status(status == 0, status, "pthread_kill");
2830  return status;
2831}
2832
2833// "Randomly" selected value for how long we want to spin
2834// before bailing out on suspending a thread, also how often
2835// we send a signal to a thread we want to resume
2836static const int RANDOMLY_LARGE_INTEGER = 1000000;
2837static const int RANDOMLY_LARGE_INTEGER2 = 100;
2838
2839// returns true on success and false on error - really an error is fatal
2840// but this seems the normal response to library errors
2841static bool do_suspend(OSThread* osthread) {
2842  assert(osthread->sr.is_running(), "thread should be running");
2843  // mark as suspended and send signal
2844
2845  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
2846    // failed to switch, state wasn't running?
2847    ShouldNotReachHere();
2848    return false;
2849  }
2850
2851  if (sr_notify(osthread) != 0) {
2852    // try to cancel, switch to running
2853
2854    os::SuspendResume::State result = osthread->sr.cancel_suspend();
2855    if (result == os::SuspendResume::SR_RUNNING) {
2856      // cancelled
2857      return false;
2858    } else if (result == os::SuspendResume::SR_SUSPENDED) {
2859      // somehow managed to suspend
2860      return true;
2861    } else {
2862      ShouldNotReachHere();
2863      return false;
2864    }
2865  }
2866
2867  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
2868
2869  for (int n = 0; !osthread->sr.is_suspended(); n++) {
2870    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
2871      os::naked_yield();
2872    }
2873
2874    // timeout, try to cancel the request
2875    if (n >= RANDOMLY_LARGE_INTEGER) {
2876      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
2877      if (cancelled == os::SuspendResume::SR_RUNNING) {
2878        return false;
2879      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
2880        return true;
2881      } else {
2882        ShouldNotReachHere();
2883        return false;
2884      }
2885    }
2886  }
2887
2888  guarantee(osthread->sr.is_suspended(), "Must be suspended");
2889  return true;
2890}
2891
2892static void do_resume(OSThread* osthread) {
2893  //assert(osthread->sr.is_suspended(), "thread should be suspended");
2894
2895  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
2896    // failed to switch to WAKEUP_REQUEST
2897    ShouldNotReachHere();
2898    return;
2899  }
2900
2901  while (!osthread->sr.is_running()) {
2902    if (sr_notify(osthread) == 0) {
2903      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
2904        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
2905          os::naked_yield();
2906        }
2907      }
2908    } else {
2909      ShouldNotReachHere();
2910    }
2911  }
2912
2913  guarantee(osthread->sr.is_running(), "Must be running!");
2914}
2915
2916///////////////////////////////////////////////////////////////////////////////////
2917// signal handling (except suspend/resume)
2918
2919// This routine may be used by user applications as a "hook" to catch signals.
2920// The user-defined signal handler must pass unrecognized signals to this
2921// routine, and if it returns true (non-zero), then the signal handler must
2922// return immediately. If the flag "abort_if_unrecognized" is true, then this
2923// routine will never retun false (zero), but instead will execute a VM panic
2924// routine kill the process.
2925//
2926// If this routine returns false, it is OK to call it again. This allows
2927// the user-defined signal handler to perform checks either before or after
2928// the VM performs its own checks. Naturally, the user code would be making
2929// a serious error if it tried to handle an exception (such as a null check
2930// or breakpoint) that the VM was generating for its own correct operation.
2931//
2932// This routine may recognize any of the following kinds of signals:
2933//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
2934// It should be consulted by handlers for any of those signals.
2935//
2936// The caller of this routine must pass in the three arguments supplied
2937// to the function referred to in the "sa_sigaction" (not the "sa_handler")
2938// field of the structure passed to sigaction(). This routine assumes that
2939// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
2940//
2941// Note that the VM will print warnings if it detects conflicting signal
2942// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
2943//
2944extern "C" JNIEXPORT int
2945JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
2946
2947// Set thread signal mask (for some reason on AIX sigthreadmask() seems
2948// to be the thing to call; documentation is not terribly clear about whether
2949// pthread_sigmask also works, and if it does, whether it does the same.
2950bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
2951  const int rc = ::pthread_sigmask(how, set, oset);
2952  // return value semantics differ slightly for error case:
2953  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
2954  // (so, pthread_sigmask is more theadsafe for error handling)
2955  // But success is always 0.
2956  return rc == 0 ? true : false;
2957}
2958
2959// Function to unblock all signals which are, according
2960// to POSIX, typical program error signals. If they happen while being blocked,
2961// they typically will bring down the process immediately.
2962bool unblock_program_error_signals() {
2963  sigset_t set;
2964  ::sigemptyset(&set);
2965  ::sigaddset(&set, SIGILL);
2966  ::sigaddset(&set, SIGBUS);
2967  ::sigaddset(&set, SIGFPE);
2968  ::sigaddset(&set, SIGSEGV);
2969  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
2970}
2971
2972// Renamed from 'signalHandler' to avoid collision with other shared libs.
2973void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
2974  assert(info != NULL && uc != NULL, "it must be old kernel");
2975
2976  // Never leave program error signals blocked;
2977  // on all our platforms they would bring down the process immediately when
2978  // getting raised while being blocked.
2979  unblock_program_error_signals();
2980
2981  JVM_handle_aix_signal(sig, info, uc, true);
2982}
2983
2984// This boolean allows users to forward their own non-matching signals
2985// to JVM_handle_aix_signal, harmlessly.
2986bool os::Aix::signal_handlers_are_installed = false;
2987
2988// For signal-chaining
2989struct sigaction os::Aix::sigact[MAXSIGNUM];
2990unsigned int os::Aix::sigs = 0;
2991bool os::Aix::libjsig_is_loaded = false;
2992typedef struct sigaction *(*get_signal_t)(int);
2993get_signal_t os::Aix::get_signal_action = NULL;
2994
2995struct sigaction* os::Aix::get_chained_signal_action(int sig) {
2996  struct sigaction *actp = NULL;
2997
2998  if (libjsig_is_loaded) {
2999    // Retrieve the old signal handler from libjsig
3000    actp = (*get_signal_action)(sig);
3001  }
3002  if (actp == NULL) {
3003    // Retrieve the preinstalled signal handler from jvm
3004    actp = get_preinstalled_handler(sig);
3005  }
3006
3007  return actp;
3008}
3009
3010static bool call_chained_handler(struct sigaction *actp, int sig,
3011                                 siginfo_t *siginfo, void *context) {
3012  // Call the old signal handler
3013  if (actp->sa_handler == SIG_DFL) {
3014    // It's more reasonable to let jvm treat it as an unexpected exception
3015    // instead of taking the default action.
3016    return false;
3017  } else if (actp->sa_handler != SIG_IGN) {
3018    if ((actp->sa_flags & SA_NODEFER) == 0) {
3019      // automaticlly block the signal
3020      sigaddset(&(actp->sa_mask), sig);
3021    }
3022
3023    sa_handler_t hand = NULL;
3024    sa_sigaction_t sa = NULL;
3025    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3026    // retrieve the chained handler
3027    if (siginfo_flag_set) {
3028      sa = actp->sa_sigaction;
3029    } else {
3030      hand = actp->sa_handler;
3031    }
3032
3033    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3034      actp->sa_handler = SIG_DFL;
3035    }
3036
3037    // try to honor the signal mask
3038    sigset_t oset;
3039    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3040
3041    // call into the chained handler
3042    if (siginfo_flag_set) {
3043      (*sa)(sig, siginfo, context);
3044    } else {
3045      (*hand)(sig);
3046    }
3047
3048    // restore the signal mask
3049    pthread_sigmask(SIG_SETMASK, &oset, 0);
3050  }
3051  // Tell jvm's signal handler the signal is taken care of.
3052  return true;
3053}
3054
3055bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3056  bool chained = false;
3057  // signal-chaining
3058  if (UseSignalChaining) {
3059    struct sigaction *actp = get_chained_signal_action(sig);
3060    if (actp != NULL) {
3061      chained = call_chained_handler(actp, sig, siginfo, context);
3062    }
3063  }
3064  return chained;
3065}
3066
3067struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3068  if ((((unsigned int)1 << sig) & sigs) != 0) {
3069    return &sigact[sig];
3070  }
3071  return NULL;
3072}
3073
3074void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3075  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3076  sigact[sig] = oldAct;
3077  sigs |= (unsigned int)1 << sig;
3078}
3079
3080// for diagnostic
3081int os::Aix::sigflags[MAXSIGNUM];
3082
3083int os::Aix::get_our_sigflags(int sig) {
3084  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3085  return sigflags[sig];
3086}
3087
3088void os::Aix::set_our_sigflags(int sig, int flags) {
3089  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3090  sigflags[sig] = flags;
3091}
3092
3093void os::Aix::set_signal_handler(int sig, bool set_installed) {
3094  // Check for overwrite.
3095  struct sigaction oldAct;
3096  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3097
3098  void* oldhand = oldAct.sa_sigaction
3099    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3100    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3101  // Renamed 'signalHandler' to avoid collision with other shared libs.
3102  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3103      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3104      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3105    if (AllowUserSignalHandlers || !set_installed) {
3106      // Do not overwrite; user takes responsibility to forward to us.
3107      return;
3108    } else if (UseSignalChaining) {
3109      // save the old handler in jvm
3110      save_preinstalled_handler(sig, oldAct);
3111      // libjsig also interposes the sigaction() call below and saves the
3112      // old sigaction on it own.
3113    } else {
3114      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3115                    "%#lx for signal %d.", (long)oldhand, sig));
3116    }
3117  }
3118
3119  struct sigaction sigAct;
3120  sigfillset(&(sigAct.sa_mask));
3121  if (!set_installed) {
3122    sigAct.sa_handler = SIG_DFL;
3123    sigAct.sa_flags = SA_RESTART;
3124  } else {
3125    // Renamed 'signalHandler' to avoid collision with other shared libs.
3126    sigAct.sa_sigaction = javaSignalHandler;
3127    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3128  }
3129  // Save flags, which are set by ours
3130  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3131  sigflags[sig] = sigAct.sa_flags;
3132
3133  int ret = sigaction(sig, &sigAct, &oldAct);
3134  assert(ret == 0, "check");
3135
3136  void* oldhand2 = oldAct.sa_sigaction
3137                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3138                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3139  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3140}
3141
3142// install signal handlers for signals that HotSpot needs to
3143// handle in order to support Java-level exception handling.
3144void os::Aix::install_signal_handlers() {
3145  if (!signal_handlers_are_installed) {
3146    signal_handlers_are_installed = true;
3147
3148    // signal-chaining
3149    typedef void (*signal_setting_t)();
3150    signal_setting_t begin_signal_setting = NULL;
3151    signal_setting_t end_signal_setting = NULL;
3152    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3153                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3154    if (begin_signal_setting != NULL) {
3155      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3156                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3157      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3158                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3159      libjsig_is_loaded = true;
3160      assert(UseSignalChaining, "should enable signal-chaining");
3161    }
3162    if (libjsig_is_loaded) {
3163      // Tell libjsig jvm is setting signal handlers
3164      (*begin_signal_setting)();
3165    }
3166
3167    set_signal_handler(SIGSEGV, true);
3168    set_signal_handler(SIGPIPE, true);
3169    set_signal_handler(SIGBUS, true);
3170    set_signal_handler(SIGILL, true);
3171    set_signal_handler(SIGFPE, true);
3172    set_signal_handler(SIGTRAP, true);
3173    set_signal_handler(SIGXFSZ, true);
3174    set_signal_handler(SIGDANGER, true);
3175
3176    if (libjsig_is_loaded) {
3177      // Tell libjsig jvm finishes setting signal handlers.
3178      (*end_signal_setting)();
3179    }
3180
3181    // We don't activate signal checker if libjsig is in place, we trust ourselves
3182    // and if UserSignalHandler is installed all bets are off.
3183    // Log that signal checking is off only if -verbose:jni is specified.
3184    if (CheckJNICalls) {
3185      if (libjsig_is_loaded) {
3186        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3187        check_signals = false;
3188      }
3189      if (AllowUserSignalHandlers) {
3190        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3191        check_signals = false;
3192      }
3193      // Need to initialize check_signal_done.
3194      ::sigemptyset(&check_signal_done);
3195    }
3196  }
3197}
3198
3199static const char* get_signal_handler_name(address handler,
3200                                           char* buf, int buflen) {
3201  int offset;
3202  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3203  if (found) {
3204    // skip directory names
3205    const char *p1, *p2;
3206    p1 = buf;
3207    size_t len = strlen(os::file_separator());
3208    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3209    // The way os::dll_address_to_library_name is implemented on Aix
3210    // right now, it always returns -1 for the offset which is not
3211    // terribly informative.
3212    // Will fix that. For now, omit the offset.
3213    jio_snprintf(buf, buflen, "%s", p1);
3214  } else {
3215    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3216  }
3217  return buf;
3218}
3219
3220static void print_signal_handler(outputStream* st, int sig,
3221                                 char* buf, size_t buflen) {
3222  struct sigaction sa;
3223  sigaction(sig, NULL, &sa);
3224
3225  st->print("%s: ", os::exception_name(sig, buf, buflen));
3226
3227  address handler = (sa.sa_flags & SA_SIGINFO)
3228    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3229    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3230
3231  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3232    st->print("SIG_DFL");
3233  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3234    st->print("SIG_IGN");
3235  } else {
3236    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3237  }
3238
3239  // Print readable mask.
3240  st->print(", sa_mask[0]=");
3241  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3242
3243  address rh = VMError::get_resetted_sighandler(sig);
3244  // May be, handler was resetted by VMError?
3245  if (rh != NULL) {
3246    handler = rh;
3247    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3248  }
3249
3250  // Print textual representation of sa_flags.
3251  st->print(", sa_flags=");
3252  os::Posix::print_sa_flags(st, sa.sa_flags);
3253
3254  // Check: is it our handler?
3255  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3256      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3257    // It is our signal handler.
3258    // Check for flags, reset system-used one!
3259    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3260      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3261                os::Aix::get_our_sigflags(sig));
3262    }
3263  }
3264  st->cr();
3265}
3266
3267#define DO_SIGNAL_CHECK(sig) \
3268  if (!sigismember(&check_signal_done, sig)) \
3269    os::Aix::check_signal_handler(sig)
3270
3271// This method is a periodic task to check for misbehaving JNI applications
3272// under CheckJNI, we can add any periodic checks here
3273
3274void os::run_periodic_checks() {
3275
3276  if (check_signals == false) return;
3277
3278  // SEGV and BUS if overridden could potentially prevent
3279  // generation of hs*.log in the event of a crash, debugging
3280  // such a case can be very challenging, so we absolutely
3281  // check the following for a good measure:
3282  DO_SIGNAL_CHECK(SIGSEGV);
3283  DO_SIGNAL_CHECK(SIGILL);
3284  DO_SIGNAL_CHECK(SIGFPE);
3285  DO_SIGNAL_CHECK(SIGBUS);
3286  DO_SIGNAL_CHECK(SIGPIPE);
3287  DO_SIGNAL_CHECK(SIGXFSZ);
3288  if (UseSIGTRAP) {
3289    DO_SIGNAL_CHECK(SIGTRAP);
3290  }
3291  DO_SIGNAL_CHECK(SIGDANGER);
3292
3293  // ReduceSignalUsage allows the user to override these handlers
3294  // see comments at the very top and jvm_solaris.h
3295  if (!ReduceSignalUsage) {
3296    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3297    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3298    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3299    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3300  }
3301
3302  DO_SIGNAL_CHECK(SR_signum);
3303  DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3304}
3305
3306typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3307
3308static os_sigaction_t os_sigaction = NULL;
3309
3310void os::Aix::check_signal_handler(int sig) {
3311  char buf[O_BUFLEN];
3312  address jvmHandler = NULL;
3313
3314  struct sigaction act;
3315  if (os_sigaction == NULL) {
3316    // only trust the default sigaction, in case it has been interposed
3317    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3318    if (os_sigaction == NULL) return;
3319  }
3320
3321  os_sigaction(sig, (struct sigaction*)NULL, &act);
3322
3323  address thisHandler = (act.sa_flags & SA_SIGINFO)
3324    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3325    : CAST_FROM_FN_PTR(address, act.sa_handler);
3326
3327  switch(sig) {
3328  case SIGSEGV:
3329  case SIGBUS:
3330  case SIGFPE:
3331  case SIGPIPE:
3332  case SIGILL:
3333  case SIGXFSZ:
3334    // Renamed 'signalHandler' to avoid collision with other shared libs.
3335    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3336    break;
3337
3338  case SHUTDOWN1_SIGNAL:
3339  case SHUTDOWN2_SIGNAL:
3340  case SHUTDOWN3_SIGNAL:
3341  case BREAK_SIGNAL:
3342    jvmHandler = (address)user_handler();
3343    break;
3344
3345  case INTERRUPT_SIGNAL:
3346    jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3347    break;
3348
3349  default:
3350    if (sig == SR_signum) {
3351      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3352    } else {
3353      return;
3354    }
3355    break;
3356  }
3357
3358  if (thisHandler != jvmHandler) {
3359    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3360    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3361    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3362    // No need to check this sig any longer
3363    sigaddset(&check_signal_done, sig);
3364    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3365    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3366      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3367                    exception_name(sig, buf, O_BUFLEN));
3368    }
3369  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3370    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3371    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3372    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3373    // No need to check this sig any longer
3374    sigaddset(&check_signal_done, sig);
3375  }
3376
3377  // Dump all the signal
3378  if (sigismember(&check_signal_done, sig)) {
3379    print_signal_handlers(tty, buf, O_BUFLEN);
3380  }
3381}
3382
3383extern bool signal_name(int signo, char* buf, size_t len);
3384
3385const char* os::exception_name(int exception_code, char* buf, size_t size) {
3386  if (0 < exception_code && exception_code <= SIGRTMAX) {
3387    // signal
3388    if (!signal_name(exception_code, buf, size)) {
3389      jio_snprintf(buf, size, "SIG%d", exception_code);
3390    }
3391    return buf;
3392  } else {
3393    return NULL;
3394  }
3395}
3396
3397// To install functions for atexit system call
3398extern "C" {
3399  static void perfMemory_exit_helper() {
3400    perfMemory_exit();
3401  }
3402}
3403
3404// This is called _before_ the most of global arguments have been parsed.
3405void os::init(void) {
3406  // This is basic, we want to know if that ever changes.
3407  // (Shared memory boundary is supposed to be a 256M aligned.)
3408  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3409
3410  // First off, we need to know whether we run on AIX or PASE, and
3411  // the OS level we run on.
3412  os::Aix::initialize_os_info();
3413
3414  // Scan environment (SPEC1170 behaviour, etc).
3415  os::Aix::scan_environment();
3416
3417  // Check which pages are supported by AIX.
3418  query_multipage_support();
3419
3420  // Act like we only have one page size by eliminating corner cases which
3421  // we did not support very well anyway.
3422  // We have two input conditions:
3423  // 1) Data segment page size. This is controlled by linker setting (datapsize) on the
3424  //    launcher, and/or by LDR_CNTRL environment variable. The latter overrules the linker
3425  //    setting.
3426  //    Data segment page size is important for us because it defines the thread stack page
3427  //    size, which is needed for guard page handling, stack banging etc.
3428  // 2) The ability to allocate 64k pages dynamically. If this is a given, java heap can
3429  //    and should be allocated with 64k pages.
3430  //
3431  // So, we do the following:
3432  // LDR_CNTRL    can_use_64K_pages_dynamically       what we do                      remarks
3433  // 4K           no                                  4K                              old systems (aix 5.2, as/400 v5r4) or new systems with AME activated
3434  // 4k           yes                                 64k (treat 4k stacks as 64k)    different loader than java and standard settings
3435  // 64k          no              --- AIX 5.2 ? ---
3436  // 64k          yes                                 64k                             new systems and standard java loader (we set datapsize=64k when linking)
3437
3438  // We explicitly leave no option to change page size, because only upgrading would work,
3439  // not downgrading (if stack page size is 64k you cannot pretend its 4k).
3440
3441  if (g_multipage_support.datapsize == SIZE_4K) {
3442    // datapsize = 4K. Data segment, thread stacks are 4K paged.
3443    if (g_multipage_support.can_use_64K_pages) {
3444      // .. but we are able to use 64K pages dynamically.
3445      // This would be typical for java launchers which are not linked
3446      // with datapsize=64K (like, any other launcher but our own).
3447      //
3448      // In this case it would be smart to allocate the java heap with 64K
3449      // to get the performance benefit, and to fake 64k pages for the
3450      // data segment (when dealing with thread stacks).
3451      //
3452      // However, leave a possibility to downgrade to 4K, using
3453      // -XX:-Use64KPages.
3454      if (Use64KPages) {
3455        trcVerbose("64K page mode (faked for data segment)");
3456        Aix::_page_size = SIZE_64K;
3457      } else {
3458        trcVerbose("4K page mode (Use64KPages=off)");
3459        Aix::_page_size = SIZE_4K;
3460      }
3461    } else {
3462      // .. and not able to allocate 64k pages dynamically. Here, just
3463      // fall back to 4K paged mode and use mmap for everything.
3464      trcVerbose("4K page mode");
3465      Aix::_page_size = SIZE_4K;
3466      FLAG_SET_ERGO(bool, Use64KPages, false);
3467    }
3468  } else {
3469    // datapsize = 64k. Data segment, thread stacks are 64k paged.
3470    //   This normally means that we can allocate 64k pages dynamically.
3471    //   (There is one special case where this may be false: EXTSHM=on.
3472    //    but we decided to not support that mode).
3473    assert0(g_multipage_support.can_use_64K_pages);
3474    Aix::_page_size = SIZE_64K;
3475    trcVerbose("64K page mode");
3476    FLAG_SET_ERGO(bool, Use64KPages, true);
3477  }
3478
3479  // Short-wire stack page size to base page size; if that works, we just remove
3480  // that stack page size altogether.
3481  Aix::_stack_page_size = Aix::_page_size;
3482
3483  // For now UseLargePages is just ignored.
3484  FLAG_SET_ERGO(bool, UseLargePages, false);
3485  _page_sizes[0] = 0;
3486  _large_page_size = -1;
3487
3488  // debug trace
3489  trcVerbose("os::vm_page_size %s\n", describe_pagesize(os::vm_page_size()));
3490
3491  // Next, we need to initialize libo4 and libperfstat libraries.
3492  if (os::Aix::on_pase()) {
3493    os::Aix::initialize_libo4();
3494  } else {
3495    os::Aix::initialize_libperfstat();
3496  }
3497
3498  // Reset the perfstat information provided by ODM.
3499  if (os::Aix::on_aix()) {
3500    libperfstat::perfstat_reset();
3501  }
3502
3503  // Now initialze basic system properties. Note that for some of the values we
3504  // need libperfstat etc.
3505  os::Aix::initialize_system_info();
3506
3507  _initial_pid = getpid();
3508
3509  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3510
3511  init_random(1234567);
3512
3513  ThreadCritical::initialize();
3514
3515  // Main_thread points to the aboriginal thread.
3516  Aix::_main_thread = pthread_self();
3517
3518  initial_time_count = os::elapsed_counter();
3519
3520  // If the pagesize of the VM is greater than 8K determine the appropriate
3521  // number of initial guard pages. The user can change this with the
3522  // command line arguments, if needed.
3523  if (vm_page_size() > (int)Aix::vm_default_page_size()) {
3524    StackYellowPages = 1;
3525    StackRedPages = 1;
3526    StackShadowPages = round_to((StackShadowPages*Aix::vm_default_page_size()), vm_page_size()) / vm_page_size();
3527  }
3528}
3529
3530// This is called _after_ the global arguments have been parsed.
3531jint os::init_2(void) {
3532
3533  trcVerbose("processor count: %d", os::_processor_count);
3534  trcVerbose("physical memory: %lu", Aix::_physical_memory);
3535
3536  // Initially build up the loaded dll map.
3537  LoadedLibraries::reload();
3538
3539  const int page_size = Aix::page_size();
3540  const int map_size = page_size;
3541
3542  address map_address = (address) MAP_FAILED;
3543  const int prot  = PROT_READ;
3544  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3545
3546  // Use optimized addresses for the polling page,
3547  // e.g. map it to a special 32-bit address.
3548  if (OptimizePollingPageLocation) {
3549    // architecture-specific list of address wishes:
3550    address address_wishes[] = {
3551      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3552      // PPC64: all address wishes are non-negative 32 bit values where
3553      // the lower 16 bits are all zero. we can load these addresses
3554      // with a single ppc_lis instruction.
3555      (address) 0x30000000, (address) 0x31000000,
3556      (address) 0x32000000, (address) 0x33000000,
3557      (address) 0x40000000, (address) 0x41000000,
3558      (address) 0x42000000, (address) 0x43000000,
3559      (address) 0x50000000, (address) 0x51000000,
3560      (address) 0x52000000, (address) 0x53000000,
3561      (address) 0x60000000, (address) 0x61000000,
3562      (address) 0x62000000, (address) 0x63000000
3563    };
3564    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3565
3566    // iterate over the list of address wishes:
3567    for (int i=0; i<address_wishes_length; i++) {
3568      // Try to map with current address wish.
3569      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3570      // fail if the address is already mapped.
3571      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3572                                     map_size, prot,
3573                                     flags | MAP_FIXED,
3574                                     -1, 0);
3575      if (Verbose) {
3576        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3577                address_wishes[i], map_address + (ssize_t)page_size);
3578      }
3579
3580      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3581        // Map succeeded and map_address is at wished address, exit loop.
3582        break;
3583      }
3584
3585      if (map_address != (address) MAP_FAILED) {
3586        // Map succeeded, but polling_page is not at wished address, unmap and continue.
3587        ::munmap(map_address, map_size);
3588        map_address = (address) MAP_FAILED;
3589      }
3590      // Map failed, continue loop.
3591    }
3592  } // end OptimizePollingPageLocation
3593
3594  if (map_address == (address) MAP_FAILED) {
3595    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3596  }
3597  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3598  os::set_polling_page(map_address);
3599
3600  if (!UseMembar) {
3601    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3602    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3603    os::set_memory_serialize_page(mem_serialize_page);
3604
3605#ifndef PRODUCT
3606    if (Verbose && PrintMiscellaneous) {
3607      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3608    }
3609#endif
3610  }
3611
3612  // initialize suspend/resume support - must do this before signal_sets_init()
3613  if (SR_initialize() != 0) {
3614    perror("SR_initialize failed");
3615    return JNI_ERR;
3616  }
3617
3618  Aix::signal_sets_init();
3619  Aix::install_signal_handlers();
3620
3621  // Check minimum allowable stack size for thread creation and to initialize
3622  // the java system classes, including StackOverflowError - depends on page
3623  // size. Add a page for compiler2 recursion in main thread.
3624  // Add in 2*BytesPerWord times page size to account for VM stack during
3625  // class initialization depending on 32 or 64 bit VM.
3626  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3627            (size_t)(StackYellowPages+StackRedPages+StackShadowPages) * Aix::page_size() +
3628                     (2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::vm_default_page_size());
3629
3630  os::Aix::min_stack_allowed = align_size_up(os::Aix::min_stack_allowed, os::Aix::page_size());
3631
3632  size_t threadStackSizeInBytes = ThreadStackSize * K;
3633  if (threadStackSizeInBytes != 0 &&
3634      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3635    tty->print_cr("\nThe stack size specified is too small, "
3636                  "Specify at least %dk",
3637                  os::Aix::min_stack_allowed / K);
3638    return JNI_ERR;
3639  }
3640
3641  // Make the stack size a multiple of the page size so that
3642  // the yellow/red zones can be guarded.
3643  // Note that this can be 0, if no default stacksize was set.
3644  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3645
3646  Aix::libpthread_init();
3647
3648  if (MaxFDLimit) {
3649    // Set the number of file descriptors to max. print out error
3650    // if getrlimit/setrlimit fails but continue regardless.
3651    struct rlimit nbr_files;
3652    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3653    if (status != 0) {
3654      if (PrintMiscellaneous && (Verbose || WizardMode))
3655        perror("os::init_2 getrlimit failed");
3656    } else {
3657      nbr_files.rlim_cur = nbr_files.rlim_max;
3658      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3659      if (status != 0) {
3660        if (PrintMiscellaneous && (Verbose || WizardMode))
3661          perror("os::init_2 setrlimit failed");
3662      }
3663    }
3664  }
3665
3666  if (PerfAllowAtExitRegistration) {
3667    // Only register atexit functions if PerfAllowAtExitRegistration is set.
3668    // Atexit functions can be delayed until process exit time, which
3669    // can be problematic for embedded VM situations. Embedded VMs should
3670    // call DestroyJavaVM() to assure that VM resources are released.
3671
3672    // Note: perfMemory_exit_helper atexit function may be removed in
3673    // the future if the appropriate cleanup code can be added to the
3674    // VM_Exit VMOperation's doit method.
3675    if (atexit(perfMemory_exit_helper) != 0) {
3676      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3677    }
3678  }
3679
3680  return JNI_OK;
3681}
3682
3683// Mark the polling page as unreadable
3684void os::make_polling_page_unreadable(void) {
3685  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3686    fatal("Could not disable polling page");
3687  }
3688};
3689
3690// Mark the polling page as readable
3691void os::make_polling_page_readable(void) {
3692  // Changed according to os_linux.cpp.
3693  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3694    fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3695  }
3696};
3697
3698int os::active_processor_count() {
3699  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3700  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3701  return online_cpus;
3702}
3703
3704void os::set_native_thread_name(const char *name) {
3705  // Not yet implemented.
3706  return;
3707}
3708
3709bool os::distribute_processes(uint length, uint* distribution) {
3710  // Not yet implemented.
3711  return false;
3712}
3713
3714bool os::bind_to_processor(uint processor_id) {
3715  // Not yet implemented.
3716  return false;
3717}
3718
3719void os::SuspendedThreadTask::internal_do_task() {
3720  if (do_suspend(_thread->osthread())) {
3721    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3722    do_task(context);
3723    do_resume(_thread->osthread());
3724  }
3725}
3726
3727class PcFetcher : public os::SuspendedThreadTask {
3728public:
3729  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3730  ExtendedPC result();
3731protected:
3732  void do_task(const os::SuspendedThreadTaskContext& context);
3733private:
3734  ExtendedPC _epc;
3735};
3736
3737ExtendedPC PcFetcher::result() {
3738  guarantee(is_done(), "task is not done yet.");
3739  return _epc;
3740}
3741
3742void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3743  Thread* thread = context.thread();
3744  OSThread* osthread = thread->osthread();
3745  if (osthread->ucontext() != NULL) {
3746    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3747  } else {
3748    // NULL context is unexpected, double-check this is the VMThread.
3749    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3750  }
3751}
3752
3753// Suspends the target using the signal mechanism and then grabs the PC before
3754// resuming the target. Used by the flat-profiler only
3755ExtendedPC os::get_thread_pc(Thread* thread) {
3756  // Make sure that it is called by the watcher for the VMThread.
3757  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3758  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3759
3760  PcFetcher fetcher(thread);
3761  fetcher.run();
3762  return fetcher.result();
3763}
3764
3765// Not neede on Aix.
3766// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
3767// }
3768
3769////////////////////////////////////////////////////////////////////////////////
3770// debug support
3771
3772static address same_page(address x, address y) {
3773  intptr_t page_bits = -os::vm_page_size();
3774  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3775    return x;
3776  else if (x > y)
3777    return (address)(intptr_t(y) | ~page_bits) + 1;
3778  else
3779    return (address)(intptr_t(y) & page_bits);
3780}
3781
3782bool os::find(address addr, outputStream* st) {
3783
3784  st->print(PTR_FORMAT ": ", addr);
3785
3786  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3787  if (lib) {
3788    lib->print(st);
3789    return true;
3790  } else {
3791    lib = LoadedLibraries::find_for_data_address(addr);
3792    if (lib) {
3793      lib->print(st);
3794      return true;
3795    } else {
3796      st->print_cr("(outside any module)");
3797    }
3798  }
3799
3800  return false;
3801}
3802
3803////////////////////////////////////////////////////////////////////////////////
3804// misc
3805
3806// This does not do anything on Aix. This is basically a hook for being
3807// able to use structured exception handling (thread-local exception filters)
3808// on, e.g., Win32.
3809void
3810os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3811                         JavaCallArguments* args, Thread* thread) {
3812  f(value, method, args, thread);
3813}
3814
3815void os::print_statistics() {
3816}
3817
3818int os::message_box(const char* title, const char* message) {
3819  int i;
3820  fdStream err(defaultStream::error_fd());
3821  for (i = 0; i < 78; i++) err.print_raw("=");
3822  err.cr();
3823  err.print_raw_cr(title);
3824  for (i = 0; i < 78; i++) err.print_raw("-");
3825  err.cr();
3826  err.print_raw_cr(message);
3827  for (i = 0; i < 78; i++) err.print_raw("=");
3828  err.cr();
3829
3830  char buf[16];
3831  // Prevent process from exiting upon "read error" without consuming all CPU
3832  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3833
3834  return buf[0] == 'y' || buf[0] == 'Y';
3835}
3836
3837int os::stat(const char *path, struct stat *sbuf) {
3838  char pathbuf[MAX_PATH];
3839  if (strlen(path) > MAX_PATH - 1) {
3840    errno = ENAMETOOLONG;
3841    return -1;
3842  }
3843  os::native_path(strcpy(pathbuf, path));
3844  return ::stat(pathbuf, sbuf);
3845}
3846
3847bool os::check_heap(bool force) {
3848  return true;
3849}
3850
3851// Is a (classpath) directory empty?
3852bool os::dir_is_empty(const char* path) {
3853  DIR *dir = NULL;
3854  struct dirent *ptr;
3855
3856  dir = opendir(path);
3857  if (dir == NULL) return true;
3858
3859  /* Scan the directory */
3860  bool result = true;
3861  char buf[sizeof(struct dirent) + MAX_PATH];
3862  while (result && (ptr = ::readdir(dir)) != NULL) {
3863    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
3864      result = false;
3865    }
3866  }
3867  closedir(dir);
3868  return result;
3869}
3870
3871// This code originates from JDK's sysOpen and open64_w
3872// from src/solaris/hpi/src/system_md.c
3873
3874int os::open(const char *path, int oflag, int mode) {
3875
3876  if (strlen(path) > MAX_PATH - 1) {
3877    errno = ENAMETOOLONG;
3878    return -1;
3879  }
3880  int fd;
3881
3882  fd = ::open64(path, oflag, mode);
3883  if (fd == -1) return -1;
3884
3885  // If the open succeeded, the file might still be a directory.
3886  {
3887    struct stat64 buf64;
3888    int ret = ::fstat64(fd, &buf64);
3889    int st_mode = buf64.st_mode;
3890
3891    if (ret != -1) {
3892      if ((st_mode & S_IFMT) == S_IFDIR) {
3893        errno = EISDIR;
3894        ::close(fd);
3895        return -1;
3896      }
3897    } else {
3898      ::close(fd);
3899      return -1;
3900    }
3901  }
3902
3903  // All file descriptors that are opened in the JVM and not
3904  // specifically destined for a subprocess should have the
3905  // close-on-exec flag set. If we don't set it, then careless 3rd
3906  // party native code might fork and exec without closing all
3907  // appropriate file descriptors (e.g. as we do in closeDescriptors in
3908  // UNIXProcess.c), and this in turn might:
3909  //
3910  // - cause end-of-file to fail to be detected on some file
3911  //   descriptors, resulting in mysterious hangs, or
3912  //
3913  // - might cause an fopen in the subprocess to fail on a system
3914  //   suffering from bug 1085341.
3915  //
3916  // (Yes, the default setting of the close-on-exec flag is a Unix
3917  // design flaw.)
3918  //
3919  // See:
3920  // 1085341: 32-bit stdio routines should support file descriptors >255
3921  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
3922  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
3923#ifdef FD_CLOEXEC
3924  {
3925    int flags = ::fcntl(fd, F_GETFD);
3926    if (flags != -1)
3927      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
3928  }
3929#endif
3930
3931  return fd;
3932}
3933
3934// create binary file, rewriting existing file if required
3935int os::create_binary_file(const char* path, bool rewrite_existing) {
3936  int oflags = O_WRONLY | O_CREAT;
3937  if (!rewrite_existing) {
3938    oflags |= O_EXCL;
3939  }
3940  return ::open64(path, oflags, S_IREAD | S_IWRITE);
3941}
3942
3943// return current position of file pointer
3944jlong os::current_file_offset(int fd) {
3945  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
3946}
3947
3948// move file pointer to the specified offset
3949jlong os::seek_to_file_offset(int fd, jlong offset) {
3950  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
3951}
3952
3953// This code originates from JDK's sysAvailable
3954// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
3955
3956int os::available(int fd, jlong *bytes) {
3957  jlong cur, end;
3958  int mode;
3959  struct stat64 buf64;
3960
3961  if (::fstat64(fd, &buf64) >= 0) {
3962    mode = buf64.st_mode;
3963    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
3964      // XXX: is the following call interruptible? If so, this might
3965      // need to go through the INTERRUPT_IO() wrapper as for other
3966      // blocking, interruptible calls in this file.
3967      int n;
3968      if (::ioctl(fd, FIONREAD, &n) >= 0) {
3969        *bytes = n;
3970        return 1;
3971      }
3972    }
3973  }
3974  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
3975    return 0;
3976  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
3977    return 0;
3978  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
3979    return 0;
3980  }
3981  *bytes = end - cur;
3982  return 1;
3983}
3984
3985// Map a block of memory.
3986char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
3987                        char *addr, size_t bytes, bool read_only,
3988                        bool allow_exec) {
3989  int prot;
3990  int flags = MAP_PRIVATE;
3991
3992  if (read_only) {
3993    prot = PROT_READ;
3994    flags = MAP_SHARED;
3995  } else {
3996    prot = PROT_READ | PROT_WRITE;
3997    flags = MAP_PRIVATE;
3998  }
3999
4000  if (allow_exec) {
4001    prot |= PROT_EXEC;
4002  }
4003
4004  if (addr != NULL) {
4005    flags |= MAP_FIXED;
4006  }
4007
4008  // Allow anonymous mappings if 'fd' is -1.
4009  if (fd == -1) {
4010    flags |= MAP_ANONYMOUS;
4011  }
4012
4013  char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
4014                                     fd, file_offset);
4015  if (mapped_address == MAP_FAILED) {
4016    return NULL;
4017  }
4018  return mapped_address;
4019}
4020
4021// Remap a block of memory.
4022char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4023                          char *addr, size_t bytes, bool read_only,
4024                          bool allow_exec) {
4025  // same as map_memory() on this OS
4026  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4027                        allow_exec);
4028}
4029
4030// Unmap a block of memory.
4031bool os::pd_unmap_memory(char* addr, size_t bytes) {
4032  return munmap(addr, bytes) == 0;
4033}
4034
4035// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4036// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4037// of a thread.
4038//
4039// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4040// the fast estimate available on the platform.
4041
4042jlong os::current_thread_cpu_time() {
4043  // return user + sys since the cost is the same
4044  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4045  assert(n >= 0, "negative CPU time");
4046  return n;
4047}
4048
4049jlong os::thread_cpu_time(Thread* thread) {
4050  // consistent with what current_thread_cpu_time() returns
4051  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4052  assert(n >= 0, "negative CPU time");
4053  return n;
4054}
4055
4056jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4057  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4058  assert(n >= 0, "negative CPU time");
4059  return n;
4060}
4061
4062static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4063  bool error = false;
4064
4065  jlong sys_time = 0;
4066  jlong user_time = 0;
4067
4068  // Reimplemented using getthrds64().
4069  //
4070  // Works like this:
4071  // For the thread in question, get the kernel thread id. Then get the
4072  // kernel thread statistics using that id.
4073  //
4074  // This only works of course when no pthread scheduling is used,
4075  // i.e. there is a 1:1 relationship to kernel threads.
4076  // On AIX, see AIXTHREAD_SCOPE variable.
4077
4078  pthread_t pthtid = thread->osthread()->pthread_id();
4079
4080  // retrieve kernel thread id for the pthread:
4081  tid64_t tid = 0;
4082  struct __pthrdsinfo pinfo;
4083  // I just love those otherworldly IBM APIs which force me to hand down
4084  // dummy buffers for stuff I dont care for...
4085  char dummy[1];
4086  int dummy_size = sizeof(dummy);
4087  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4088                          dummy, &dummy_size) == 0) {
4089    tid = pinfo.__pi_tid;
4090  } else {
4091    tty->print_cr("pthread_getthrds_np failed.");
4092    error = true;
4093  }
4094
4095  // retrieve kernel timing info for that kernel thread
4096  if (!error) {
4097    struct thrdentry64 thrdentry;
4098    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4099      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4100      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4101    } else {
4102      tty->print_cr("pthread_getthrds_np failed.");
4103      error = true;
4104    }
4105  }
4106
4107  if (p_sys_time) {
4108    *p_sys_time = sys_time;
4109  }
4110
4111  if (p_user_time) {
4112    *p_user_time = user_time;
4113  }
4114
4115  if (error) {
4116    return false;
4117  }
4118
4119  return true;
4120}
4121
4122jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4123  jlong sys_time;
4124  jlong user_time;
4125
4126  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4127    return -1;
4128  }
4129
4130  return user_sys_cpu_time ? sys_time + user_time : user_time;
4131}
4132
4133void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4134  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4135  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4136  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4137  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4138}
4139
4140void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4141  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4142  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4143  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4144  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4145}
4146
4147bool os::is_thread_cpu_time_supported() {
4148  return true;
4149}
4150
4151// System loadavg support. Returns -1 if load average cannot be obtained.
4152// For now just return the system wide load average (no processor sets).
4153int os::loadavg(double values[], int nelem) {
4154
4155  // Implemented using libperfstat on AIX.
4156
4157  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4158  guarantee(values, "argument error");
4159
4160  if (os::Aix::on_pase()) {
4161    Unimplemented();
4162    return -1;
4163  } else {
4164    // AIX: use libperfstat
4165    //
4166    // See also:
4167    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4168    // /usr/include/libperfstat.h:
4169
4170    // Use the already AIX version independent get_cpuinfo.
4171    os::Aix::cpuinfo_t ci;
4172    if (os::Aix::get_cpuinfo(&ci)) {
4173      for (int i = 0; i < nelem; i++) {
4174        values[i] = ci.loadavg[i];
4175      }
4176    } else {
4177      return -1;
4178    }
4179    return nelem;
4180  }
4181}
4182
4183void os::pause() {
4184  char filename[MAX_PATH];
4185  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4186    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4187  } else {
4188    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4189  }
4190
4191  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4192  if (fd != -1) {
4193    struct stat buf;
4194    ::close(fd);
4195    while (::stat(filename, &buf) == 0) {
4196      (void)::poll(NULL, 0, 100);
4197    }
4198  } else {
4199    jio_fprintf(stderr,
4200      "Could not open pause file '%s', continuing immediately.\n", filename);
4201  }
4202}
4203
4204bool os::Aix::is_primordial_thread() {
4205  if (pthread_self() == (pthread_t)1) {
4206    return true;
4207  } else {
4208    return false;
4209  }
4210}
4211
4212// OS recognitions (PASE/AIX, OS level) call this before calling any
4213// one of Aix::on_pase(), Aix::os_version() static
4214void os::Aix::initialize_os_info() {
4215
4216  assert(_on_pase == -1 && _os_version == -1, "already called.");
4217
4218  struct utsname uts;
4219  memset(&uts, 0, sizeof(uts));
4220  strcpy(uts.sysname, "?");
4221  if (::uname(&uts) == -1) {
4222    trc("uname failed (%d)", errno);
4223    guarantee(0, "Could not determine whether we run on AIX or PASE");
4224  } else {
4225    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4226               "node \"%s\" machine \"%s\"\n",
4227               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4228    const int major = atoi(uts.version);
4229    assert(major > 0, "invalid OS version");
4230    const int minor = atoi(uts.release);
4231    assert(minor > 0, "invalid OS release");
4232    _os_version = (major << 8) | minor;
4233    if (strcmp(uts.sysname, "OS400") == 0) {
4234      Unimplemented();
4235    } else if (strcmp(uts.sysname, "AIX") == 0) {
4236      // We run on AIX. We do not support versions older than AIX 5.3.
4237      _on_pase = 0;
4238      if (_os_version < 0x0503) {
4239        trc("AIX release older than AIX 5.3 not supported.");
4240        assert(false, "AIX release too old.");
4241      } else {
4242        trcVerbose("We run on AIX %d.%d\n", major, minor);
4243      }
4244    } else {
4245      assert(false, "unknown OS");
4246    }
4247  }
4248
4249  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4250} // end: os::Aix::initialize_os_info()
4251
4252// Scan environment for important settings which might effect the VM.
4253// Trace out settings. Warn about invalid settings and/or correct them.
4254//
4255// Must run after os::Aix::initialue_os_info().
4256void os::Aix::scan_environment() {
4257
4258  char* p;
4259  int rc;
4260
4261  // Warn explicity if EXTSHM=ON is used. That switch changes how
4262  // System V shared memory behaves. One effect is that page size of
4263  // shared memory cannot be change dynamically, effectivly preventing
4264  // large pages from working.
4265  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4266  // recommendation is (in OSS notes) to switch it off.
4267  p = ::getenv("EXTSHM");
4268  if (Verbose) {
4269    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4270  }
4271  if (p && strcasecmp(p, "ON") == 0) {
4272    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4273    _extshm = 1;
4274  } else {
4275    _extshm = 0;
4276  }
4277
4278  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4279  // Not tested, not supported.
4280  //
4281  // Note that it might be worth the trouble to test and to require it, if only to
4282  // get useful return codes for mprotect.
4283  //
4284  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4285  // exec() ? before loading the libjvm ? ....)
4286  p = ::getenv("XPG_SUS_ENV");
4287  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
4288  if (p && strcmp(p, "ON") == 0) {
4289    _xpg_sus_mode = 1;
4290    trc("Unsupported setting: XPG_SUS_ENV=ON");
4291    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4292    // clobber address ranges. If we ever want to support that, we have to do some
4293    // testing first.
4294    guarantee(false, "XPG_SUS_ENV=ON not supported");
4295  } else {
4296    _xpg_sus_mode = 0;
4297  }
4298
4299  // Switch off AIX internal (pthread) guard pages. This has
4300  // immediate effect for any pthread_create calls which follow.
4301  p = ::getenv("AIXTHREAD_GUARDPAGES");
4302  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
4303  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4304  guarantee(rc == 0, "");
4305
4306} // end: os::Aix::scan_environment()
4307
4308// PASE: initialize the libo4 library (AS400 PASE porting library).
4309void os::Aix::initialize_libo4() {
4310  Unimplemented();
4311}
4312
4313// AIX: initialize the libperfstat library (we load this dynamically
4314// because it is only available on AIX.
4315void os::Aix::initialize_libperfstat() {
4316
4317  assert(os::Aix::on_aix(), "AIX only");
4318
4319  if (!libperfstat::init()) {
4320    trc("libperfstat initialization failed.");
4321    assert(false, "libperfstat initialization failed");
4322  } else {
4323    if (Verbose) {
4324      fprintf(stderr, "libperfstat initialized.\n");
4325    }
4326  }
4327} // end: os::Aix::initialize_libperfstat
4328
4329/////////////////////////////////////////////////////////////////////////////
4330// thread stack
4331
4332// Function to query the current stack size using pthread_getthrds_np.
4333static bool query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4334  // This only works when invoked on a pthread. As we agreed not to use
4335  // primordial threads anyway, I assert here.
4336  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4337
4338  // Information about this api can be found (a) in the pthread.h header and
4339  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4340  //
4341  // The use of this API to find out the current stack is kind of undefined.
4342  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4343  // enough for cases where I let the pthread library create its stacks. For cases
4344  // where I create an own stack and pass this to pthread_create, it seems not to
4345  // work (the returned stack size in that case is 0).
4346
4347  pthread_t tid = pthread_self();
4348  struct __pthrdsinfo pinfo;
4349  char dummy[1]; // We only need this to satisfy the api and to not get E.
4350  int dummy_size = sizeof(dummy);
4351
4352  memset(&pinfo, 0, sizeof(pinfo));
4353
4354  const int rc = pthread_getthrds_np(&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4355                                     sizeof(pinfo), dummy, &dummy_size);
4356
4357  if (rc != 0) {
4358    assert0(false);
4359    trcVerbose("pthread_getthrds_np failed (%d)", rc);
4360    return false;
4361  }
4362  guarantee0(pinfo.__pi_stackend);
4363
4364  // The following can happen when invoking pthread_getthrds_np on a pthread running
4365  // on a user provided stack (when handing down a stack to pthread create, see
4366  // pthread_attr_setstackaddr).
4367  // Not sure what to do here - I feel inclined to forbid this use case completely.
4368  guarantee0(pinfo.__pi_stacksize);
4369
4370  // Note: the pthread stack on AIX seems to look like this:
4371  //
4372  // ---------------------   real base ? at page border ?
4373  //
4374  //     pthread internal data, like ~2K, see also
4375  //     http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/thread_supp_tun_params.htm
4376  //
4377  // ---------------------   __pi_stackend - not page aligned, (xxxxF890)
4378  //
4379  //     stack
4380  //      ....
4381  //
4382  //     stack
4383  //
4384  // ---------------------   __pi_stackend  - __pi_stacksize
4385  //
4386  //     padding due to AIX guard pages (?) see AIXTHREAD_GUARDPAGES
4387  // ---------------------   __pi_stackaddr  (page aligned if AIXTHREAD_GUARDPAGES > 0)
4388  //
4389  //   AIX guard pages (?)
4390  //
4391
4392  // So, the safe thing to do is to use the area from __pi_stackend to __pi_stackaddr;
4393  // __pi_stackend however is almost never page aligned.
4394  //
4395
4396  if (p_stack_base) {
4397    (*p_stack_base) = (address) (pinfo.__pi_stackend);
4398  }
4399
4400  if (p_stack_size) {
4401    (*p_stack_size) = pinfo.__pi_stackend - pinfo.__pi_stackaddr;
4402  }
4403
4404  return true;
4405}
4406
4407// Get the current stack base from the OS (actually, the pthread library).
4408address os::current_stack_base() {
4409  address p;
4410  query_stack_dimensions(&p, 0);
4411  return p;
4412}
4413
4414// Get the current stack size from the OS (actually, the pthread library).
4415size_t os::current_stack_size() {
4416  size_t s;
4417  query_stack_dimensions(0, &s);
4418  return s;
4419}
4420
4421// Refer to the comments in os_solaris.cpp park-unpark.
4422//
4423// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4424// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4425// For specifics regarding the bug see GLIBC BUGID 261237 :
4426//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4427// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4428// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4429// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4430// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4431// and monitorenter when we're using 1-0 locking. All those operations may result in
4432// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4433// of libpthread avoids the problem, but isn't practical.
4434//
4435// Possible remedies:
4436//
4437// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4438//      This is palliative and probabilistic, however. If the thread is preempted
4439//      between the call to compute_abstime() and pthread_cond_timedwait(), more
4440//      than the minimum period may have passed, and the abstime may be stale (in the
4441//      past) resultin in a hang. Using this technique reduces the odds of a hang
4442//      but the JVM is still vulnerable, particularly on heavily loaded systems.
4443//
4444// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4445//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4446//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4447//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4448//      thread.
4449//
4450// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4451//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4452//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4453//      This also works well. In fact it avoids kernel-level scalability impediments
4454//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4455//      timers in a graceful fashion.
4456//
4457// 4.   When the abstime value is in the past it appears that control returns
4458//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4459//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4460//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4461//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4462//      It may be possible to avoid reinitialization by checking the return
4463//      value from pthread_cond_timedwait(). In addition to reinitializing the
4464//      condvar we must establish the invariant that cond_signal() is only called
4465//      within critical sections protected by the adjunct mutex. This prevents
4466//      cond_signal() from "seeing" a condvar that's in the midst of being
4467//      reinitialized or that is corrupt. Sadly, this invariant obviates the
4468//      desirable signal-after-unlock optimization that avoids futile context switching.
4469//
4470//      I'm also concerned that some versions of NTPL might allocate an auxilliary
4471//      structure when a condvar is used or initialized. cond_destroy() would
4472//      release the helper structure. Our reinitialize-after-timedwait fix
4473//      put excessive stress on malloc/free and locks protecting the c-heap.
4474//
4475// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4476// It may be possible to refine (4) by checking the kernel and NTPL verisons
4477// and only enabling the work-around for vulnerable environments.
4478
4479// utility to compute the abstime argument to timedwait:
4480// millis is the relative timeout time
4481// abstime will be the absolute timeout time
4482// TODO: replace compute_abstime() with unpackTime()
4483
4484static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4485  if (millis < 0) millis = 0;
4486  struct timeval now;
4487  int status = gettimeofday(&now, NULL);
4488  assert(status == 0, "gettimeofday");
4489  jlong seconds = millis / 1000;
4490  millis %= 1000;
4491  if (seconds > 50000000) { // see man cond_timedwait(3T)
4492    seconds = 50000000;
4493  }
4494  abstime->tv_sec = now.tv_sec  + seconds;
4495  long       usec = now.tv_usec + millis * 1000;
4496  if (usec >= 1000000) {
4497    abstime->tv_sec += 1;
4498    usec -= 1000000;
4499  }
4500  abstime->tv_nsec = usec * 1000;
4501  return abstime;
4502}
4503
4504// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4505// Conceptually TryPark() should be equivalent to park(0).
4506
4507int os::PlatformEvent::TryPark() {
4508  for (;;) {
4509    const int v = _Event;
4510    guarantee ((v == 0) || (v == 1), "invariant");
4511    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4512  }
4513}
4514
4515void os::PlatformEvent::park() {       // AKA "down()"
4516  // Invariant: Only the thread associated with the Event/PlatformEvent
4517  // may call park().
4518  // TODO: assert that _Assoc != NULL or _Assoc == Self
4519  int v;
4520  for (;;) {
4521    v = _Event;
4522    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4523  }
4524  guarantee (v >= 0, "invariant");
4525  if (v == 0) {
4526    // Do this the hard way by blocking ...
4527    int status = pthread_mutex_lock(_mutex);
4528    assert_status(status == 0, status, "mutex_lock");
4529    guarantee (_nParked == 0, "invariant");
4530    ++ _nParked;
4531    while (_Event < 0) {
4532      status = pthread_cond_wait(_cond, _mutex);
4533      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4534    }
4535    -- _nParked;
4536
4537    // In theory we could move the ST of 0 into _Event past the unlock(),
4538    // but then we'd need a MEMBAR after the ST.
4539    _Event = 0;
4540    status = pthread_mutex_unlock(_mutex);
4541    assert_status(status == 0, status, "mutex_unlock");
4542  }
4543  guarantee (_Event >= 0, "invariant");
4544}
4545
4546int os::PlatformEvent::park(jlong millis) {
4547  guarantee (_nParked == 0, "invariant");
4548
4549  int v;
4550  for (;;) {
4551    v = _Event;
4552    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4553  }
4554  guarantee (v >= 0, "invariant");
4555  if (v != 0) return OS_OK;
4556
4557  // We do this the hard way, by blocking the thread.
4558  // Consider enforcing a minimum timeout value.
4559  struct timespec abst;
4560  compute_abstime(&abst, millis);
4561
4562  int ret = OS_TIMEOUT;
4563  int status = pthread_mutex_lock(_mutex);
4564  assert_status(status == 0, status, "mutex_lock");
4565  guarantee (_nParked == 0, "invariant");
4566  ++_nParked;
4567
4568  // Object.wait(timo) will return because of
4569  // (a) notification
4570  // (b) timeout
4571  // (c) thread.interrupt
4572  //
4573  // Thread.interrupt and object.notify{All} both call Event::set.
4574  // That is, we treat thread.interrupt as a special case of notification.
4575  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4576  // We assume all ETIME returns are valid.
4577  //
4578  // TODO: properly differentiate simultaneous notify+interrupt.
4579  // In that case, we should propagate the notify to another waiter.
4580
4581  while (_Event < 0) {
4582    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4583    assert_status(status == 0 || status == ETIMEDOUT,
4584                  status, "cond_timedwait");
4585    if (!FilterSpuriousWakeups) break;         // previous semantics
4586    if (status == ETIMEDOUT) break;
4587    // We consume and ignore EINTR and spurious wakeups.
4588  }
4589  --_nParked;
4590  if (_Event >= 0) {
4591     ret = OS_OK;
4592  }
4593  _Event = 0;
4594  status = pthread_mutex_unlock(_mutex);
4595  assert_status(status == 0, status, "mutex_unlock");
4596  assert (_nParked == 0, "invariant");
4597  return ret;
4598}
4599
4600void os::PlatformEvent::unpark() {
4601  int v, AnyWaiters;
4602  for (;;) {
4603    v = _Event;
4604    if (v > 0) {
4605      // The LD of _Event could have reordered or be satisfied
4606      // by a read-aside from this processor's write buffer.
4607      // To avoid problems execute a barrier and then
4608      // ratify the value.
4609      OrderAccess::fence();
4610      if (_Event == v) return;
4611      continue;
4612    }
4613    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4614  }
4615  if (v < 0) {
4616    // Wait for the thread associated with the event to vacate
4617    int status = pthread_mutex_lock(_mutex);
4618    assert_status(status == 0, status, "mutex_lock");
4619    AnyWaiters = _nParked;
4620
4621    if (AnyWaiters != 0) {
4622      // We intentional signal *after* dropping the lock
4623      // to avoid a common class of futile wakeups.
4624      status = pthread_cond_signal(_cond);
4625      assert_status(status == 0, status, "cond_signal");
4626    }
4627    // Mutex should be locked for pthread_cond_signal(_cond).
4628    status = pthread_mutex_unlock(_mutex);
4629    assert_status(status == 0, status, "mutex_unlock");
4630  }
4631
4632  // Note that we signal() _after dropping the lock for "immortal" Events.
4633  // This is safe and avoids a common class of futile wakeups. In rare
4634  // circumstances this can cause a thread to return prematurely from
4635  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4636  // simply re-test the condition and re-park itself.
4637}
4638
4639
4640// JSR166
4641// -------------------------------------------------------
4642
4643//
4644// The solaris and linux implementations of park/unpark are fairly
4645// conservative for now, but can be improved. They currently use a
4646// mutex/condvar pair, plus a a count.
4647// Park decrements count if > 0, else does a condvar wait. Unpark
4648// sets count to 1 and signals condvar. Only one thread ever waits
4649// on the condvar. Contention seen when trying to park implies that someone
4650// is unparking you, so don't wait. And spurious returns are fine, so there
4651// is no need to track notifications.
4652//
4653
4654#define MAX_SECS 100000000
4655//
4656// This code is common to linux and solaris and will be moved to a
4657// common place in dolphin.
4658//
4659// The passed in time value is either a relative time in nanoseconds
4660// or an absolute time in milliseconds. Either way it has to be unpacked
4661// into suitable seconds and nanoseconds components and stored in the
4662// given timespec structure.
4663// Given time is a 64-bit value and the time_t used in the timespec is only
4664// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4665// overflow if times way in the future are given. Further on Solaris versions
4666// prior to 10 there is a restriction (see cond_timedwait) that the specified
4667// number of seconds, in abstime, is less than current_time + 100,000,000.
4668// As it will be 28 years before "now + 100000000" will overflow we can
4669// ignore overflow and just impose a hard-limit on seconds using the value
4670// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4671// years from "now".
4672//
4673
4674static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4675  assert (time > 0, "convertTime");
4676
4677  struct timeval now;
4678  int status = gettimeofday(&now, NULL);
4679  assert(status == 0, "gettimeofday");
4680
4681  time_t max_secs = now.tv_sec + MAX_SECS;
4682
4683  if (isAbsolute) {
4684    jlong secs = time / 1000;
4685    if (secs > max_secs) {
4686      absTime->tv_sec = max_secs;
4687    }
4688    else {
4689      absTime->tv_sec = secs;
4690    }
4691    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4692  }
4693  else {
4694    jlong secs = time / NANOSECS_PER_SEC;
4695    if (secs >= MAX_SECS) {
4696      absTime->tv_sec = max_secs;
4697      absTime->tv_nsec = 0;
4698    }
4699    else {
4700      absTime->tv_sec = now.tv_sec + secs;
4701      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4702      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4703        absTime->tv_nsec -= NANOSECS_PER_SEC;
4704        ++absTime->tv_sec; // note: this must be <= max_secs
4705      }
4706    }
4707  }
4708  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4709  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4710  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4711  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4712}
4713
4714void Parker::park(bool isAbsolute, jlong time) {
4715  // Optional fast-path check:
4716  // Return immediately if a permit is available.
4717  if (_counter > 0) {
4718    _counter = 0;
4719    OrderAccess::fence();
4720    return;
4721  }
4722
4723  Thread* thread = Thread::current();
4724  assert(thread->is_Java_thread(), "Must be JavaThread");
4725  JavaThread *jt = (JavaThread *)thread;
4726
4727  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4728  // Check interrupt before trying to wait
4729  if (Thread::is_interrupted(thread, false)) {
4730    return;
4731  }
4732
4733  // Next, demultiplex/decode time arguments
4734  timespec absTime;
4735  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4736    return;
4737  }
4738  if (time > 0) {
4739    unpackTime(&absTime, isAbsolute, time);
4740  }
4741
4742  // Enter safepoint region
4743  // Beware of deadlocks such as 6317397.
4744  // The per-thread Parker:: mutex is a classic leaf-lock.
4745  // In particular a thread must never block on the Threads_lock while
4746  // holding the Parker:: mutex. If safepoints are pending both the
4747  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4748  ThreadBlockInVM tbivm(jt);
4749
4750  // Don't wait if cannot get lock since interference arises from
4751  // unblocking. Also. check interrupt before trying wait
4752  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4753    return;
4754  }
4755
4756  int status;
4757  if (_counter > 0) { // no wait needed
4758    _counter = 0;
4759    status = pthread_mutex_unlock(_mutex);
4760    assert (status == 0, "invariant");
4761    OrderAccess::fence();
4762    return;
4763  }
4764
4765#ifdef ASSERT
4766  // Don't catch signals while blocked; let the running threads have the signals.
4767  // (This allows a debugger to break into the running thread.)
4768  sigset_t oldsigs;
4769  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4770  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4771#endif
4772
4773  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4774  jt->set_suspend_equivalent();
4775  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4776
4777  if (time == 0) {
4778    status = pthread_cond_wait (_cond, _mutex);
4779  } else {
4780    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4781    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4782      pthread_cond_destroy (_cond);
4783      pthread_cond_init    (_cond, NULL);
4784    }
4785  }
4786  assert_status(status == 0 || status == EINTR ||
4787                status == ETIME || status == ETIMEDOUT,
4788                status, "cond_timedwait");
4789
4790#ifdef ASSERT
4791  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4792#endif
4793
4794  _counter = 0;
4795  status = pthread_mutex_unlock(_mutex);
4796  assert_status(status == 0, status, "invariant");
4797  // If externally suspended while waiting, re-suspend
4798  if (jt->handle_special_suspend_equivalent_condition()) {
4799    jt->java_suspend_self();
4800  }
4801
4802  OrderAccess::fence();
4803}
4804
4805void Parker::unpark() {
4806  int s, status;
4807  status = pthread_mutex_lock(_mutex);
4808  assert (status == 0, "invariant");
4809  s = _counter;
4810  _counter = 1;
4811  if (s < 1) {
4812    if (WorkAroundNPTLTimedWaitHang) {
4813      status = pthread_cond_signal (_cond);
4814      assert (status == 0, "invariant");
4815      status = pthread_mutex_unlock(_mutex);
4816      assert (status == 0, "invariant");
4817    } else {
4818      status = pthread_mutex_unlock(_mutex);
4819      assert (status == 0, "invariant");
4820      status = pthread_cond_signal (_cond);
4821      assert (status == 0, "invariant");
4822    }
4823  } else {
4824    pthread_mutex_unlock(_mutex);
4825    assert (status == 0, "invariant");
4826  }
4827}
4828
4829extern char** environ;
4830
4831// Run the specified command in a separate process. Return its exit value,
4832// or -1 on failure (e.g. can't fork a new process).
4833// Unlike system(), this function can be called from signal handler. It
4834// doesn't block SIGINT et al.
4835int os::fork_and_exec(char* cmd) {
4836  char * argv[4] = {"sh", "-c", cmd, NULL};
4837
4838  pid_t pid = fork();
4839
4840  if (pid < 0) {
4841    // fork failed
4842    return -1;
4843
4844  } else if (pid == 0) {
4845    // child process
4846
4847    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
4848    execve("/usr/bin/sh", argv, environ);
4849
4850    // execve failed
4851    _exit(-1);
4852
4853  } else {
4854    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4855    // care about the actual exit code, for now.
4856
4857    int status;
4858
4859    // Wait for the child process to exit. This returns immediately if
4860    // the child has already exited. */
4861    while (waitpid(pid, &status, 0) < 0) {
4862      switch (errno) {
4863        case ECHILD: return 0;
4864        case EINTR: break;
4865        default: return -1;
4866      }
4867    }
4868
4869    if (WIFEXITED(status)) {
4870      // The child exited normally; get its exit code.
4871      return WEXITSTATUS(status);
4872    } else if (WIFSIGNALED(status)) {
4873      // The child exited because of a signal.
4874      // The best value to return is 0x80 + signal number,
4875      // because that is what all Unix shells do, and because
4876      // it allows callers to distinguish between process exit and
4877      // process death by signal.
4878      return 0x80 + WTERMSIG(status);
4879    } else {
4880      // Unknown exit code; pass it through.
4881      return status;
4882    }
4883  }
4884  return -1;
4885}
4886
4887// is_headless_jre()
4888//
4889// Test for the existence of xawt/libmawt.so or libawt_xawt.so
4890// in order to report if we are running in a headless jre.
4891//
4892// Since JDK8 xawt/libmawt.so is moved into the same directory
4893// as libawt.so, and renamed libawt_xawt.so
4894bool os::is_headless_jre() {
4895  struct stat statbuf;
4896  char buf[MAXPATHLEN];
4897  char libmawtpath[MAXPATHLEN];
4898  const char *xawtstr = "/xawt/libmawt.so";
4899  const char *new_xawtstr = "/libawt_xawt.so";
4900
4901  char *p;
4902
4903  // Get path to libjvm.so
4904  os::jvm_path(buf, sizeof(buf));
4905
4906  // Get rid of libjvm.so
4907  p = strrchr(buf, '/');
4908  if (p == NULL) return false;
4909  else *p = '\0';
4910
4911  // Get rid of client or server
4912  p = strrchr(buf, '/');
4913  if (p == NULL) return false;
4914  else *p = '\0';
4915
4916  // check xawt/libmawt.so
4917  strcpy(libmawtpath, buf);
4918  strcat(libmawtpath, xawtstr);
4919  if (::stat(libmawtpath, &statbuf) == 0) return false;
4920
4921  // check libawt_xawt.so
4922  strcpy(libmawtpath, buf);
4923  strcat(libmawtpath, new_xawtstr);
4924  if (::stat(libmawtpath, &statbuf) == 0) return false;
4925
4926  return true;
4927}
4928
4929// Get the default path to the core file
4930// Returns the length of the string
4931int os::get_core_path(char* buffer, size_t bufferSize) {
4932  const char* p = get_current_directory(buffer, bufferSize);
4933
4934  if (p == NULL) {
4935    assert(p != NULL, "failed to get current directory");
4936    return 0;
4937  }
4938
4939  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
4940                                               p, current_process_id());
4941
4942  return strlen(buffer);
4943}
4944
4945#ifndef PRODUCT
4946void TestReserveMemorySpecial_test() {
4947  // No tests available for this platform
4948}
4949#endif
4950