os_aix.cpp revision 7344:1d29b13e8a51
1/*
2 * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libperfstat_aix.hpp"
40#include "loadlib_aix.hpp"
41#include "memory/allocation.inline.hpp"
42#include "memory/filemap.hpp"
43#include "mutex_aix.inline.hpp"
44#include "oops/oop.inline.hpp"
45#include "os_aix.inline.hpp"
46#include "os_share_aix.hpp"
47#include "porting_aix.hpp"
48#include "prims/jniFastGetField.hpp"
49#include "prims/jvm.h"
50#include "prims/jvm_misc.hpp"
51#include "runtime/arguments.hpp"
52#include "runtime/atomic.inline.hpp"
53#include "runtime/extendedPC.hpp"
54#include "runtime/globals.hpp"
55#include "runtime/interfaceSupport.hpp"
56#include "runtime/java.hpp"
57#include "runtime/javaCalls.hpp"
58#include "runtime/mutexLocker.hpp"
59#include "runtime/objectMonitor.hpp"
60#include "runtime/orderAccess.inline.hpp"
61#include "runtime/os.hpp"
62#include "runtime/osThread.hpp"
63#include "runtime/perfMemory.hpp"
64#include "runtime/sharedRuntime.hpp"
65#include "runtime/statSampler.hpp"
66#include "runtime/stubRoutines.hpp"
67#include "runtime/thread.inline.hpp"
68#include "runtime/threadCritical.hpp"
69#include "runtime/timer.hpp"
70#include "runtime/vm_version.hpp"
71#include "services/attachListener.hpp"
72#include "services/runtimeService.hpp"
73#include "utilities/decoder.hpp"
74#include "utilities/defaultStream.hpp"
75#include "utilities/events.hpp"
76#include "utilities/growableArray.hpp"
77#include "utilities/vmError.hpp"
78
79// put OS-includes here (sorted alphabetically)
80#include <errno.h>
81#include <fcntl.h>
82#include <inttypes.h>
83#include <poll.h>
84#include <procinfo.h>
85#include <pthread.h>
86#include <pwd.h>
87#include <semaphore.h>
88#include <signal.h>
89#include <stdint.h>
90#include <stdio.h>
91#include <string.h>
92#include <unistd.h>
93#include <sys/ioctl.h>
94#include <sys/ipc.h>
95#include <sys/mman.h>
96#include <sys/resource.h>
97#include <sys/select.h>
98#include <sys/shm.h>
99#include <sys/socket.h>
100#include <sys/stat.h>
101#include <sys/sysinfo.h>
102#include <sys/systemcfg.h>
103#include <sys/time.h>
104#include <sys/times.h>
105#include <sys/types.h>
106#include <sys/utsname.h>
107#include <sys/vminfo.h>
108#include <sys/wait.h>
109
110// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
111#if !defined(_AIXVERSION_610)
112extern "C" {
113  int getthrds64(pid_t ProcessIdentifier,
114                 struct thrdentry64* ThreadBuffer,
115                 int ThreadSize,
116                 tid64_t* IndexPointer,
117                 int Count);
118}
119#endif
120
121// Excerpts from systemcfg.h definitions newer than AIX 5.3
122#ifndef PV_7
123# define PV_7 0x200000          // Power PC 7
124# define PV_7_Compat 0x208000   // Power PC 7
125#endif
126
127#define MAX_PATH (2 * K)
128
129// for timer info max values which include all bits
130#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
131// for multipage initialization error analysis (in 'g_multipage_error')
132#define ERROR_MP_OS_TOO_OLD                          100
133#define ERROR_MP_EXTSHM_ACTIVE                       101
134#define ERROR_MP_VMGETINFO_FAILED                    102
135#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
136
137// the semantics in this file are thus that codeptr_t is a *real code ptr*
138// This means that any function taking codeptr_t as arguments will assume
139// a real codeptr and won't handle function descriptors (eg getFuncName),
140// whereas functions taking address as args will deal with function
141// descriptors (eg os::dll_address_to_library_name)
142typedef unsigned int* codeptr_t;
143
144// typedefs for stackslots, stack pointers, pointers to op codes
145typedef unsigned long stackslot_t;
146typedef stackslot_t* stackptr_t;
147
148// query dimensions of the stack of the calling thread
149static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
150
151// function to check a given stack pointer against given stack limits
152inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
153  if (((uintptr_t)sp) & 0x7) {
154    return false;
155  }
156  if (sp > stack_base) {
157    return false;
158  }
159  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
160    return false;
161  }
162  return true;
163}
164
165// returns true if function is a valid codepointer
166inline bool is_valid_codepointer(codeptr_t p) {
167  if (!p) {
168    return false;
169  }
170  if (((uintptr_t)p) & 0x3) {
171    return false;
172  }
173  if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
174    return false;
175  }
176  return true;
177}
178
179// macro to check a given stack pointer against given stack limits and to die if test fails
180#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
181    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
182}
183
184// macro to check the current stack pointer against given stacklimits
185#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
186  address sp; \
187  sp = os::current_stack_pointer(); \
188  CHECK_STACK_PTR(sp, stack_base, stack_size); \
189}
190
191////////////////////////////////////////////////////////////////////////////////
192// global variables (for a description see os_aix.hpp)
193
194julong    os::Aix::_physical_memory = 0;
195pthread_t os::Aix::_main_thread = ((pthread_t)0);
196int       os::Aix::_page_size = -1;
197int       os::Aix::_on_pase = -1;
198int       os::Aix::_os_version = -1;
199int       os::Aix::_stack_page_size = -1;
200size_t    os::Aix::_shm_default_page_size = -1;
201int       os::Aix::_can_use_64K_pages = -1;
202int       os::Aix::_can_use_16M_pages = -1;
203int       os::Aix::_xpg_sus_mode = -1;
204int       os::Aix::_extshm = -1;
205int       os::Aix::_logical_cpus = -1;
206
207////////////////////////////////////////////////////////////////////////////////
208// local variables
209
210static int      g_multipage_error  = -1;   // error analysis for multipage initialization
211static jlong    initial_time_count = 0;
212static int      clock_tics_per_sec = 100;
213static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
214static bool     check_signals      = true;
215static pid_t    _initial_pid       = 0;
216static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
217static sigset_t SR_sigset;
218static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
219
220julong os::available_memory() {
221  return Aix::available_memory();
222}
223
224julong os::Aix::available_memory() {
225  os::Aix::meminfo_t mi;
226  if (os::Aix::get_meminfo(&mi)) {
227    return mi.real_free;
228  } else {
229    return 0xFFFFFFFFFFFFFFFFLL;
230  }
231}
232
233julong os::physical_memory() {
234  return Aix::physical_memory();
235}
236
237////////////////////////////////////////////////////////////////////////////////
238// environment support
239
240bool os::getenv(const char* name, char* buf, int len) {
241  const char* val = ::getenv(name);
242  if (val != NULL && strlen(val) < (size_t)len) {
243    strcpy(buf, val);
244    return true;
245  }
246  if (len > 0) buf[0] = 0;  // return a null string
247  return false;
248}
249
250
251// Return true if user is running as root.
252
253bool os::have_special_privileges() {
254  static bool init = false;
255  static bool privileges = false;
256  if (!init) {
257    privileges = (getuid() != geteuid()) || (getgid() != getegid());
258    init = true;
259  }
260  return privileges;
261}
262
263// Helper function, emulates disclaim64 using multiple 32bit disclaims
264// because we cannot use disclaim64() on AS/400 and old AIX releases.
265static bool my_disclaim64(char* addr, size_t size) {
266
267  if (size == 0) {
268    return true;
269  }
270
271  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
272  const unsigned int maxDisclaimSize = 0x80000000;
273
274  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
275  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
276
277  char* p = addr;
278
279  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
280    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
281      //if (Verbose)
282      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
283      return false;
284    }
285    p += maxDisclaimSize;
286  }
287
288  if (lastDisclaimSize > 0) {
289    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
290      //if (Verbose)
291        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
292      return false;
293    }
294  }
295
296  return true;
297}
298
299// Cpu architecture string
300#if defined(PPC32)
301static char cpu_arch[] = "ppc";
302#elif defined(PPC64)
303static char cpu_arch[] = "ppc64";
304#else
305#error Add appropriate cpu_arch setting
306#endif
307
308
309// Given an address, returns the size of the page backing that address.
310size_t os::Aix::query_pagesize(void* addr) {
311
312  vm_page_info pi;
313  pi.addr = (uint64_t)addr;
314  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
315    return pi.pagesize;
316  } else {
317    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
318    assert(false, "vmgetinfo failed to retrieve page size");
319    return SIZE_4K;
320  }
321
322}
323
324// Returns the kernel thread id of the currently running thread.
325pid_t os::Aix::gettid() {
326  return (pid_t) thread_self();
327}
328
329void os::Aix::initialize_system_info() {
330
331  // get the number of online(logical) cpus instead of configured
332  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
333  assert(_processor_count > 0, "_processor_count must be > 0");
334
335  // retrieve total physical storage
336  os::Aix::meminfo_t mi;
337  if (!os::Aix::get_meminfo(&mi)) {
338    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
339    assert(false, "os::Aix::get_meminfo failed.");
340  }
341  _physical_memory = (julong) mi.real_total;
342}
343
344// Helper function for tracing page sizes.
345static const char* describe_pagesize(size_t pagesize) {
346  switch (pagesize) {
347    case SIZE_4K : return "4K";
348    case SIZE_64K: return "64K";
349    case SIZE_16M: return "16M";
350    case SIZE_16G: return "16G";
351    default:
352      assert(false, "surprise");
353      return "??";
354  }
355}
356
357// Retrieve information about multipage size support. Will initialize
358// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
359// Aix::_can_use_16M_pages.
360// Must be called before calling os::large_page_init().
361void os::Aix::query_multipage_support() {
362
363  guarantee(_page_size == -1 &&
364            _stack_page_size == -1 &&
365            _can_use_64K_pages == -1 &&
366            _can_use_16M_pages == -1 &&
367            g_multipage_error == -1,
368            "do not call twice");
369
370  _page_size = ::sysconf(_SC_PAGESIZE);
371
372  // This really would surprise me.
373  assert(_page_size == SIZE_4K, "surprise!");
374
375
376  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
377  // Default data page size is influenced either by linker options (-bdatapsize)
378  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
379  // default should be 4K.
380  size_t data_page_size = SIZE_4K;
381  {
382    void* p = os::malloc(SIZE_16M, mtInternal);
383    guarantee(p != NULL, "malloc failed");
384    data_page_size = os::Aix::query_pagesize(p);
385    os::free(p);
386  }
387
388  // query default shm page size (LDR_CNTRL SHMPSIZE)
389  {
390    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
391    guarantee(shmid != -1, "shmget failed");
392    void* p = ::shmat(shmid, NULL, 0);
393    ::shmctl(shmid, IPC_RMID, NULL);
394    guarantee(p != (void*) -1, "shmat failed");
395    _shm_default_page_size = os::Aix::query_pagesize(p);
396    ::shmdt(p);
397  }
398
399  // before querying the stack page size, make sure we are not running as primordial
400  // thread (because primordial thread's stack may have different page size than
401  // pthread thread stacks). Running a VM on the primordial thread won't work for a
402  // number of reasons so we may just as well guarantee it here
403  guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
404
405  // query stack page size
406  {
407    int dummy = 0;
408    _stack_page_size = os::Aix::query_pagesize(&dummy);
409    // everything else would surprise me and should be looked into
410    guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
411    // also, just for completeness: pthread stacks are allocated from C heap, so
412    // stack page size should be the same as data page size
413    guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
414  }
415
416  // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
417  // for system V shm.
418  if (Aix::extshm()) {
419    if (Verbose) {
420      fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
421                      "Please make sure EXTSHM is OFF for large page support.\n");
422    }
423    g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
424    _can_use_64K_pages = _can_use_16M_pages = 0;
425    goto query_multipage_support_end;
426  }
427
428  // now check which page sizes the OS claims it supports, and of those, which actually can be used.
429  {
430    const int MAX_PAGE_SIZES = 4;
431    psize_t sizes[MAX_PAGE_SIZES];
432    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
433    if (num_psizes == -1) {
434      if (Verbose) {
435        fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
436        fprintf(stderr, "disabling multipage support.\n");
437      }
438      g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
439      _can_use_64K_pages = _can_use_16M_pages = 0;
440      goto query_multipage_support_end;
441    }
442    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
443    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
444    if (Verbose) {
445      fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
446      for (int i = 0; i < num_psizes; i ++) {
447        fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
448      }
449      fprintf(stderr, " .\n");
450    }
451
452    // Can we use 64K, 16M pages?
453    _can_use_64K_pages = 0;
454    _can_use_16M_pages = 0;
455    for (int i = 0; i < num_psizes; i ++) {
456      if (sizes[i] == SIZE_64K) {
457        _can_use_64K_pages = 1;
458      } else if (sizes[i] == SIZE_16M) {
459        _can_use_16M_pages = 1;
460      }
461    }
462
463    if (!_can_use_64K_pages) {
464      g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
465    }
466
467    // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
468    // there must be an actual 16M page pool, and we must run with enough rights.
469    if (_can_use_16M_pages) {
470      const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
471      guarantee(shmid != -1, "shmget failed");
472      struct shmid_ds shm_buf = { 0 };
473      shm_buf.shm_pagesize = SIZE_16M;
474      const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
475      const int en = errno;
476      ::shmctl(shmid, IPC_RMID, NULL);
477      if (!can_set_pagesize) {
478        if (Verbose) {
479          fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
480                          "Will deactivate 16M support.\n", en, strerror(en));
481        }
482        _can_use_16M_pages = 0;
483      }
484    }
485
486  } // end: check which pages can be used for shared memory
487
488query_multipage_support_end:
489
490  guarantee(_page_size != -1 &&
491            _stack_page_size != -1 &&
492            _can_use_64K_pages != -1 &&
493            _can_use_16M_pages != -1, "Page sizes not properly initialized");
494
495  if (_can_use_64K_pages) {
496    g_multipage_error = 0;
497  }
498
499  if (Verbose) {
500    fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
501    fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
502    fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
503    fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
504    fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
505    fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
506  }
507
508} // end os::Aix::query_multipage_support()
509
510// The code for this method was initially derived from the version in os_linux.cpp.
511void os::init_system_properties_values() {
512
513#define DEFAULT_LIBPATH "/usr/lib:/lib"
514#define EXTENSIONS_DIR  "/lib/ext"
515
516  // Buffer that fits several sprintfs.
517  // Note that the space for the trailing null is provided
518  // by the nulls included by the sizeof operator.
519  const size_t bufsize =
520    MAX2((size_t)MAXPATHLEN,  // For dll_dir & friends.
521         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR)); // extensions dir
522  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
523
524  // sysclasspath, java_home, dll_dir
525  {
526    char *pslash;
527    os::jvm_path(buf, bufsize);
528
529    // Found the full path to libjvm.so.
530    // Now cut the path to <java_home>/jre if we can.
531    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
532    pslash = strrchr(buf, '/');
533    if (pslash != NULL) {
534      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
535    }
536    Arguments::set_dll_dir(buf);
537
538    if (pslash != NULL) {
539      pslash = strrchr(buf, '/');
540      if (pslash != NULL) {
541        *pslash = '\0';          // Get rid of /<arch>.
542        pslash = strrchr(buf, '/');
543        if (pslash != NULL) {
544          *pslash = '\0';        // Get rid of /lib.
545        }
546      }
547    }
548    Arguments::set_java_home(buf);
549    set_boot_path('/', ':');
550  }
551
552  // Where to look for native libraries.
553
554  // On Aix we get the user setting of LIBPATH.
555  // Eventually, all the library path setting will be done here.
556  // Get the user setting of LIBPATH.
557  const char *v = ::getenv("LIBPATH");
558  const char *v_colon = ":";
559  if (v == NULL) { v = ""; v_colon = ""; }
560
561  // Concatenate user and invariant part of ld_library_path.
562  // That's +1 for the colon and +1 for the trailing '\0'.
563  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
564  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
565  Arguments::set_library_path(ld_library_path);
566  FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
567
568  // Extensions directories.
569  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
570  Arguments::set_ext_dirs(buf);
571
572  FREE_C_HEAP_ARRAY(char, buf, mtInternal);
573
574#undef DEFAULT_LIBPATH
575#undef EXTENSIONS_DIR
576}
577
578////////////////////////////////////////////////////////////////////////////////
579// breakpoint support
580
581void os::breakpoint() {
582  BREAKPOINT;
583}
584
585extern "C" void breakpoint() {
586  // use debugger to set breakpoint here
587}
588
589////////////////////////////////////////////////////////////////////////////////
590// signal support
591
592debug_only(static bool signal_sets_initialized = false);
593static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
594
595bool os::Aix::is_sig_ignored(int sig) {
596  struct sigaction oact;
597  sigaction(sig, (struct sigaction*)NULL, &oact);
598  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
599    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
600  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
601    return true;
602  else
603    return false;
604}
605
606void os::Aix::signal_sets_init() {
607  // Should also have an assertion stating we are still single-threaded.
608  assert(!signal_sets_initialized, "Already initialized");
609  // Fill in signals that are necessarily unblocked for all threads in
610  // the VM. Currently, we unblock the following signals:
611  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
612  //                         by -Xrs (=ReduceSignalUsage));
613  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
614  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
615  // the dispositions or masks wrt these signals.
616  // Programs embedding the VM that want to use the above signals for their
617  // own purposes must, at this time, use the "-Xrs" option to prevent
618  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
619  // (See bug 4345157, and other related bugs).
620  // In reality, though, unblocking these signals is really a nop, since
621  // these signals are not blocked by default.
622  sigemptyset(&unblocked_sigs);
623  sigemptyset(&allowdebug_blocked_sigs);
624  sigaddset(&unblocked_sigs, SIGILL);
625  sigaddset(&unblocked_sigs, SIGSEGV);
626  sigaddset(&unblocked_sigs, SIGBUS);
627  sigaddset(&unblocked_sigs, SIGFPE);
628  sigaddset(&unblocked_sigs, SIGTRAP);
629  sigaddset(&unblocked_sigs, SIGDANGER);
630  sigaddset(&unblocked_sigs, SR_signum);
631
632  if (!ReduceSignalUsage) {
633   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
634     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
635     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
636   }
637   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
638     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
639     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
640   }
641   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
642     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
643     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
644   }
645  }
646  // Fill in signals that are blocked by all but the VM thread.
647  sigemptyset(&vm_sigs);
648  if (!ReduceSignalUsage)
649    sigaddset(&vm_sigs, BREAK_SIGNAL);
650  debug_only(signal_sets_initialized = true);
651}
652
653// These are signals that are unblocked while a thread is running Java.
654// (For some reason, they get blocked by default.)
655sigset_t* os::Aix::unblocked_signals() {
656  assert(signal_sets_initialized, "Not initialized");
657  return &unblocked_sigs;
658}
659
660// These are the signals that are blocked while a (non-VM) thread is
661// running Java. Only the VM thread handles these signals.
662sigset_t* os::Aix::vm_signals() {
663  assert(signal_sets_initialized, "Not initialized");
664  return &vm_sigs;
665}
666
667// These are signals that are blocked during cond_wait to allow debugger in
668sigset_t* os::Aix::allowdebug_blocked_signals() {
669  assert(signal_sets_initialized, "Not initialized");
670  return &allowdebug_blocked_sigs;
671}
672
673void os::Aix::hotspot_sigmask(Thread* thread) {
674
675  //Save caller's signal mask before setting VM signal mask
676  sigset_t caller_sigmask;
677  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
678
679  OSThread* osthread = thread->osthread();
680  osthread->set_caller_sigmask(caller_sigmask);
681
682  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
683
684  if (!ReduceSignalUsage) {
685    if (thread->is_VM_thread()) {
686      // Only the VM thread handles BREAK_SIGNAL ...
687      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
688    } else {
689      // ... all other threads block BREAK_SIGNAL
690      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
691    }
692  }
693}
694
695// retrieve memory information.
696// Returns false if something went wrong;
697// content of pmi undefined in this case.
698bool os::Aix::get_meminfo(meminfo_t* pmi) {
699
700  assert(pmi, "get_meminfo: invalid parameter");
701
702  memset(pmi, 0, sizeof(meminfo_t));
703
704  if (os::Aix::on_pase()) {
705
706    Unimplemented();
707    return false;
708
709  } else {
710
711    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
712    // See:
713    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
714    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
715    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
716    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
717
718    perfstat_memory_total_t psmt;
719    memset (&psmt, '\0', sizeof(psmt));
720    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
721    if (rc == -1) {
722      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
723      assert(0, "perfstat_memory_total() failed");
724      return false;
725    }
726
727    assert(rc == 1, "perfstat_memory_total() - weird return code");
728
729    // excerpt from
730    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
731    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
732    // The fields of perfstat_memory_total_t:
733    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
734    // u_longlong_t real_total         Total real memory (in 4 KB pages).
735    // u_longlong_t real_free          Free real memory (in 4 KB pages).
736    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
737    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
738
739    pmi->virt_total = psmt.virt_total * 4096;
740    pmi->real_total = psmt.real_total * 4096;
741    pmi->real_free = psmt.real_free * 4096;
742    pmi->pgsp_total = psmt.pgsp_total * 4096;
743    pmi->pgsp_free = psmt.pgsp_free * 4096;
744
745    return true;
746
747  }
748} // end os::Aix::get_meminfo
749
750// Retrieve global cpu information.
751// Returns false if something went wrong;
752// the content of pci is undefined in this case.
753bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
754  assert(pci, "get_cpuinfo: invalid parameter");
755  memset(pci, 0, sizeof(cpuinfo_t));
756
757  perfstat_cpu_total_t psct;
758  memset (&psct, '\0', sizeof(psct));
759
760  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
761    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
762    assert(0, "perfstat_cpu_total() failed");
763    return false;
764  }
765
766  // global cpu information
767  strcpy (pci->description, psct.description);
768  pci->processorHZ = psct.processorHZ;
769  pci->ncpus = psct.ncpus;
770  os::Aix::_logical_cpus = psct.ncpus;
771  for (int i = 0; i < 3; i++) {
772    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
773  }
774
775  // get the processor version from _system_configuration
776  switch (_system_configuration.version) {
777  case PV_7:
778    strcpy(pci->version, "Power PC 7");
779    break;
780  case PV_6_1:
781    strcpy(pci->version, "Power PC 6 DD1.x");
782    break;
783  case PV_6:
784    strcpy(pci->version, "Power PC 6");
785    break;
786  case PV_5:
787    strcpy(pci->version, "Power PC 5");
788    break;
789  case PV_5_2:
790    strcpy(pci->version, "Power PC 5_2");
791    break;
792  case PV_5_3:
793    strcpy(pci->version, "Power PC 5_3");
794    break;
795  case PV_5_Compat:
796    strcpy(pci->version, "PV_5_Compat");
797    break;
798  case PV_6_Compat:
799    strcpy(pci->version, "PV_6_Compat");
800    break;
801  case PV_7_Compat:
802    strcpy(pci->version, "PV_7_Compat");
803    break;
804  default:
805    strcpy(pci->version, "unknown");
806  }
807
808  return true;
809
810} //end os::Aix::get_cpuinfo
811
812//////////////////////////////////////////////////////////////////////////////
813// detecting pthread library
814
815void os::Aix::libpthread_init() {
816  return;
817}
818
819//////////////////////////////////////////////////////////////////////////////
820// create new thread
821
822// Thread start routine for all newly created threads
823static void *java_start(Thread *thread) {
824
825  // find out my own stack dimensions
826  {
827    // actually, this should do exactly the same as thread->record_stack_base_and_size...
828    address base = 0;
829    size_t size = 0;
830    query_stack_dimensions(&base, &size);
831    thread->set_stack_base(base);
832    thread->set_stack_size(size);
833  }
834
835  // Do some sanity checks.
836  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
837
838  // Try to randomize the cache line index of hot stack frames.
839  // This helps when threads of the same stack traces evict each other's
840  // cache lines. The threads can be either from the same JVM instance, or
841  // from different JVM instances. The benefit is especially true for
842  // processors with hyperthreading technology.
843
844  static int counter = 0;
845  int pid = os::current_process_id();
846  alloca(((pid ^ counter++) & 7) * 128);
847
848  ThreadLocalStorage::set_thread(thread);
849
850  OSThread* osthread = thread->osthread();
851
852  // thread_id is kernel thread id (similar to Solaris LWP id)
853  osthread->set_thread_id(os::Aix::gettid());
854
855  // initialize signal mask for this thread
856  os::Aix::hotspot_sigmask(thread);
857
858  // initialize floating point control register
859  os::Aix::init_thread_fpu_state();
860
861  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
862
863  // call one more level start routine
864  thread->run();
865
866  return 0;
867}
868
869bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
870
871  // We want the whole function to be synchronized.
872  ThreadCritical cs;
873
874  assert(thread->osthread() == NULL, "caller responsible");
875
876  // Allocate the OSThread object
877  OSThread* osthread = new OSThread(NULL, NULL);
878  if (osthread == NULL) {
879    return false;
880  }
881
882  // set the correct thread state
883  osthread->set_thread_type(thr_type);
884
885  // Initial state is ALLOCATED but not INITIALIZED
886  osthread->set_state(ALLOCATED);
887
888  thread->set_osthread(osthread);
889
890  // init thread attributes
891  pthread_attr_t attr;
892  pthread_attr_init(&attr);
893  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
894
895  // Make sure we run in 1:1 kernel-user-thread mode.
896  if (os::Aix::on_aix()) {
897    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
898    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
899  } // end: aix
900
901  // Start in suspended state, and in os::thread_start, wake the thread up.
902  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
903
904  // calculate stack size if it's not specified by caller
905  if (os::Aix::supports_variable_stack_size()) {
906    if (stack_size == 0) {
907      stack_size = os::Aix::default_stack_size(thr_type);
908
909      switch (thr_type) {
910      case os::java_thread:
911        // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
912        assert(JavaThread::stack_size_at_create() > 0, "this should be set");
913        stack_size = JavaThread::stack_size_at_create();
914        break;
915      case os::compiler_thread:
916        if (CompilerThreadStackSize > 0) {
917          stack_size = (size_t)(CompilerThreadStackSize * K);
918          break;
919        } // else fall through:
920          // use VMThreadStackSize if CompilerThreadStackSize is not defined
921      case os::vm_thread:
922      case os::pgc_thread:
923      case os::cgc_thread:
924      case os::watcher_thread:
925        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
926        break;
927      }
928    }
929
930    stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
931    pthread_attr_setstacksize(&attr, stack_size);
932  } //else let thread_create() pick the default value (96 K on AIX)
933
934  pthread_t tid;
935  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
936
937  pthread_attr_destroy(&attr);
938
939  if (ret != 0) {
940    if (PrintMiscellaneous && (Verbose || WizardMode)) {
941      perror("pthread_create()");
942    }
943    // Need to clean up stuff we've allocated so far
944    thread->set_osthread(NULL);
945    delete osthread;
946    return false;
947  }
948
949  // Store pthread info into the OSThread
950  osthread->set_pthread_id(tid);
951
952  return true;
953}
954
955/////////////////////////////////////////////////////////////////////////////
956// attach existing thread
957
958// bootstrap the main thread
959bool os::create_main_thread(JavaThread* thread) {
960  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
961  return create_attached_thread(thread);
962}
963
964bool os::create_attached_thread(JavaThread* thread) {
965#ifdef ASSERT
966    thread->verify_not_published();
967#endif
968
969  // Allocate the OSThread object
970  OSThread* osthread = new OSThread(NULL, NULL);
971
972  if (osthread == NULL) {
973    return false;
974  }
975
976  // Store pthread info into the OSThread
977  osthread->set_thread_id(os::Aix::gettid());
978  osthread->set_pthread_id(::pthread_self());
979
980  // initialize floating point control register
981  os::Aix::init_thread_fpu_state();
982
983  // some sanity checks
984  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
985
986  // Initial thread state is RUNNABLE
987  osthread->set_state(RUNNABLE);
988
989  thread->set_osthread(osthread);
990
991  if (UseNUMA) {
992    int lgrp_id = os::numa_get_group_id();
993    if (lgrp_id != -1) {
994      thread->set_lgrp_id(lgrp_id);
995    }
996  }
997
998  // initialize signal mask for this thread
999  // and save the caller's signal mask
1000  os::Aix::hotspot_sigmask(thread);
1001
1002  return true;
1003}
1004
1005void os::pd_start_thread(Thread* thread) {
1006  int status = pthread_continue_np(thread->osthread()->pthread_id());
1007  assert(status == 0, "thr_continue failed");
1008}
1009
1010// Free OS resources related to the OSThread
1011void os::free_thread(OSThread* osthread) {
1012  assert(osthread != NULL, "osthread not set");
1013
1014  if (Thread::current()->osthread() == osthread) {
1015    // Restore caller's signal mask
1016    sigset_t sigmask = osthread->caller_sigmask();
1017    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1018   }
1019
1020  delete osthread;
1021}
1022
1023//////////////////////////////////////////////////////////////////////////////
1024// thread local storage
1025
1026int os::allocate_thread_local_storage() {
1027  pthread_key_t key;
1028  int rslt = pthread_key_create(&key, NULL);
1029  assert(rslt == 0, "cannot allocate thread local storage");
1030  return (int)key;
1031}
1032
1033// Note: This is currently not used by VM, as we don't destroy TLS key
1034// on VM exit.
1035void os::free_thread_local_storage(int index) {
1036  int rslt = pthread_key_delete((pthread_key_t)index);
1037  assert(rslt == 0, "invalid index");
1038}
1039
1040void os::thread_local_storage_at_put(int index, void* value) {
1041  int rslt = pthread_setspecific((pthread_key_t)index, value);
1042  assert(rslt == 0, "pthread_setspecific failed");
1043}
1044
1045extern "C" Thread* get_thread() {
1046  return ThreadLocalStorage::thread();
1047}
1048
1049////////////////////////////////////////////////////////////////////////////////
1050// time support
1051
1052// Time since start-up in seconds to a fine granularity.
1053// Used by VMSelfDestructTimer and the MemProfiler.
1054double os::elapsedTime() {
1055  return (double)(os::elapsed_counter()) * 0.000001;
1056}
1057
1058jlong os::elapsed_counter() {
1059  timeval time;
1060  int status = gettimeofday(&time, NULL);
1061  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1062}
1063
1064jlong os::elapsed_frequency() {
1065  return (1000 * 1000);
1066}
1067
1068// For now, we say that linux does not support vtime. I have no idea
1069// whether it can actually be made to (DLD, 9/13/05).
1070
1071bool os::supports_vtime() { return false; }
1072bool os::enable_vtime()   { return false; }
1073bool os::vtime_enabled()  { return false; }
1074double os::elapsedVTime() {
1075  // better than nothing, but not much
1076  return elapsedTime();
1077}
1078
1079jlong os::javaTimeMillis() {
1080  timeval time;
1081  int status = gettimeofday(&time, NULL);
1082  assert(status != -1, "aix error at gettimeofday()");
1083  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1084}
1085
1086// We need to manually declare mread_real_time,
1087// because IBM didn't provide a prototype in time.h.
1088// (they probably only ever tested in C, not C++)
1089extern "C"
1090int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1091
1092jlong os::javaTimeNanos() {
1093  if (os::Aix::on_pase()) {
1094    Unimplemented();
1095    return 0;
1096  }
1097  else {
1098    // On AIX use the precision of processors real time clock
1099    // or time base registers.
1100    timebasestruct_t time;
1101    int rc;
1102
1103    // If the CPU has a time register, it will be used and
1104    // we have to convert to real time first. After convertion we have following data:
1105    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1106    // time.tb_low  [nanoseconds after the last full second above]
1107    // We better use mread_real_time here instead of read_real_time
1108    // to ensure that we will get a monotonic increasing time.
1109    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1110      rc = time_base_to_time(&time, TIMEBASE_SZ);
1111      assert(rc != -1, "aix error at time_base_to_time()");
1112    }
1113    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1114  }
1115}
1116
1117void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1118  info_ptr->max_value = ALL_64_BITS;
1119  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1120  info_ptr->may_skip_backward = false;
1121  info_ptr->may_skip_forward = false;
1122  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1123}
1124
1125// Return the real, user, and system times in seconds from an
1126// arbitrary fixed point in the past.
1127bool os::getTimesSecs(double* process_real_time,
1128                      double* process_user_time,
1129                      double* process_system_time) {
1130  struct tms ticks;
1131  clock_t real_ticks = times(&ticks);
1132
1133  if (real_ticks == (clock_t) (-1)) {
1134    return false;
1135  } else {
1136    double ticks_per_second = (double) clock_tics_per_sec;
1137    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1138    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1139    *process_real_time = ((double) real_ticks) / ticks_per_second;
1140
1141    return true;
1142  }
1143}
1144
1145
1146char * os::local_time_string(char *buf, size_t buflen) {
1147  struct tm t;
1148  time_t long_time;
1149  time(&long_time);
1150  localtime_r(&long_time, &t);
1151  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1152               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1153               t.tm_hour, t.tm_min, t.tm_sec);
1154  return buf;
1155}
1156
1157struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1158  return localtime_r(clock, res);
1159}
1160
1161////////////////////////////////////////////////////////////////////////////////
1162// runtime exit support
1163
1164// Note: os::shutdown() might be called very early during initialization, or
1165// called from signal handler. Before adding something to os::shutdown(), make
1166// sure it is async-safe and can handle partially initialized VM.
1167void os::shutdown() {
1168
1169  // allow PerfMemory to attempt cleanup of any persistent resources
1170  perfMemory_exit();
1171
1172  // needs to remove object in file system
1173  AttachListener::abort();
1174
1175  // flush buffered output, finish log files
1176  ostream_abort();
1177
1178  // Check for abort hook
1179  abort_hook_t abort_hook = Arguments::abort_hook();
1180  if (abort_hook != NULL) {
1181    abort_hook();
1182  }
1183
1184}
1185
1186// Note: os::abort() might be called very early during initialization, or
1187// called from signal handler. Before adding something to os::abort(), make
1188// sure it is async-safe and can handle partially initialized VM.
1189void os::abort(bool dump_core) {
1190  os::shutdown();
1191  if (dump_core) {
1192#ifndef PRODUCT
1193    fdStream out(defaultStream::output_fd());
1194    out.print_raw("Current thread is ");
1195    char buf[16];
1196    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1197    out.print_raw_cr(buf);
1198    out.print_raw_cr("Dumping core ...");
1199#endif
1200    ::abort(); // dump core
1201  }
1202
1203  ::exit(1);
1204}
1205
1206// Die immediately, no exit hook, no abort hook, no cleanup.
1207void os::die() {
1208  ::abort();
1209}
1210
1211// This method is a copy of JDK's sysGetLastErrorString
1212// from src/solaris/hpi/src/system_md.c
1213
1214size_t os::lasterror(char *buf, size_t len) {
1215
1216  if (errno == 0)  return 0;
1217
1218  const char *s = ::strerror(errno);
1219  size_t n = ::strlen(s);
1220  if (n >= len) {
1221    n = len - 1;
1222  }
1223  ::strncpy(buf, s, n);
1224  buf[n] = '\0';
1225  return n;
1226}
1227
1228intx os::current_thread_id() { return (intx)pthread_self(); }
1229int os::current_process_id() {
1230
1231  // This implementation returns a unique pid, the pid of the
1232  // launcher thread that starts the vm 'process'.
1233
1234  // Under POSIX, getpid() returns the same pid as the
1235  // launcher thread rather than a unique pid per thread.
1236  // Use gettid() if you want the old pre NPTL behaviour.
1237
1238  // if you are looking for the result of a call to getpid() that
1239  // returns a unique pid for the calling thread, then look at the
1240  // OSThread::thread_id() method in osThread_linux.hpp file
1241
1242  return (int)(_initial_pid ? _initial_pid : getpid());
1243}
1244
1245// DLL functions
1246
1247const char* os::dll_file_extension() { return ".so"; }
1248
1249// This must be hard coded because it's the system's temporary
1250// directory not the java application's temp directory, ala java.io.tmpdir.
1251const char* os::get_temp_directory() { return "/tmp"; }
1252
1253static bool file_exists(const char* filename) {
1254  struct stat statbuf;
1255  if (filename == NULL || strlen(filename) == 0) {
1256    return false;
1257  }
1258  return os::stat(filename, &statbuf) == 0;
1259}
1260
1261bool os::dll_build_name(char* buffer, size_t buflen,
1262                        const char* pname, const char* fname) {
1263  bool retval = false;
1264  // Copied from libhpi
1265  const size_t pnamelen = pname ? strlen(pname) : 0;
1266
1267  // Return error on buffer overflow.
1268  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1269    *buffer = '\0';
1270    return retval;
1271  }
1272
1273  if (pnamelen == 0) {
1274    snprintf(buffer, buflen, "lib%s.so", fname);
1275    retval = true;
1276  } else if (strchr(pname, *os::path_separator()) != NULL) {
1277    int n;
1278    char** pelements = split_path(pname, &n);
1279    for (int i = 0; i < n; i++) {
1280      // Really shouldn't be NULL, but check can't hurt
1281      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1282        continue; // skip the empty path values
1283      }
1284      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1285      if (file_exists(buffer)) {
1286        retval = true;
1287        break;
1288      }
1289    }
1290    // release the storage
1291    for (int i = 0; i < n; i++) {
1292      if (pelements[i] != NULL) {
1293        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1294      }
1295    }
1296    if (pelements != NULL) {
1297      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1298    }
1299  } else {
1300    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1301    retval = true;
1302  }
1303  return retval;
1304}
1305
1306// Check if addr is inside libjvm.so.
1307bool os::address_is_in_vm(address addr) {
1308
1309  // Input could be a real pc or a function pointer literal. The latter
1310  // would be a function descriptor residing in the data segment of a module.
1311
1312  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1313  if (lib) {
1314    if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1315      return true;
1316    } else {
1317      return false;
1318    }
1319  } else {
1320    lib = LoadedLibraries::find_for_data_address(addr);
1321    if (lib) {
1322      if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1323        return true;
1324      } else {
1325        return false;
1326      }
1327    } else {
1328      return false;
1329    }
1330  }
1331}
1332
1333// Resolve an AIX function descriptor literal to a code pointer.
1334// If the input is a valid code pointer to a text segment of a loaded module,
1335//   it is returned unchanged.
1336// If the input is a valid AIX function descriptor, it is resolved to the
1337//   code entry point.
1338// If the input is neither a valid function descriptor nor a valid code pointer,
1339//   NULL is returned.
1340static address resolve_function_descriptor_to_code_pointer(address p) {
1341
1342  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1343  if (lib) {
1344    // its a real code pointer
1345    return p;
1346  } else {
1347    lib = LoadedLibraries::find_for_data_address(p);
1348    if (lib) {
1349      // pointer to data segment, potential function descriptor
1350      address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1351      if (LoadedLibraries::find_for_text_address(code_entry)) {
1352        // Its a function descriptor
1353        return code_entry;
1354      }
1355    }
1356  }
1357  return NULL;
1358}
1359
1360bool os::dll_address_to_function_name(address addr, char *buf,
1361                                      int buflen, int *offset) {
1362  if (offset) {
1363    *offset = -1;
1364  }
1365  if (buf) {
1366    buf[0] = '\0';
1367  }
1368
1369  // Resolve function ptr literals first.
1370  addr = resolve_function_descriptor_to_code_pointer(addr);
1371  if (!addr) {
1372    return false;
1373  }
1374
1375  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1376  return Decoder::decode(addr, buf, buflen, offset);
1377}
1378
1379static int getModuleName(codeptr_t pc,                    // [in] program counter
1380                         char* p_name, size_t namelen,    // [out] optional: function name
1381                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1382                         ) {
1383
1384  // initialize output parameters
1385  if (p_name && namelen > 0) {
1386    *p_name = '\0';
1387  }
1388  if (p_errmsg && errmsglen > 0) {
1389    *p_errmsg = '\0';
1390  }
1391
1392  const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1393  if (lib) {
1394    if (p_name && namelen > 0) {
1395      sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1396    }
1397    return 0;
1398  }
1399
1400  if (Verbose) {
1401    fprintf(stderr, "pc outside any module");
1402  }
1403
1404  return -1;
1405
1406}
1407
1408bool os::dll_address_to_library_name(address addr, char* buf,
1409                                     int buflen, int* offset) {
1410  if (offset) {
1411    *offset = -1;
1412  }
1413  if (buf) {
1414      buf[0] = '\0';
1415  }
1416
1417  // Resolve function ptr literals first.
1418  addr = resolve_function_descriptor_to_code_pointer(addr);
1419  if (!addr) {
1420    return false;
1421  }
1422
1423  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1424    return true;
1425  }
1426  return false;
1427}
1428
1429// Loads .dll/.so and in case of error it checks if .dll/.so was built
1430// for the same architecture as Hotspot is running on
1431void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1432
1433  if (ebuf && ebuflen > 0) {
1434    ebuf[0] = '\0';
1435    ebuf[ebuflen - 1] = '\0';
1436  }
1437
1438  if (!filename || strlen(filename) == 0) {
1439    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1440    return NULL;
1441  }
1442
1443  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1444  void * result= ::dlopen(filename, RTLD_LAZY);
1445  if (result != NULL) {
1446    // Reload dll cache. Don't do this in signal handling.
1447    LoadedLibraries::reload();
1448    return result;
1449  } else {
1450    // error analysis when dlopen fails
1451    const char* const error_report = ::dlerror();
1452    if (error_report && ebuf && ebuflen > 0) {
1453      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1454               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1455    }
1456  }
1457  return NULL;
1458}
1459
1460// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1461// chances are you might want to run the generated bits against glibc-2.0
1462// libdl.so, so always use locking for any version of glibc.
1463void* os::dll_lookup(void* handle, const char* name) {
1464  pthread_mutex_lock(&dl_mutex);
1465  void* res = dlsym(handle, name);
1466  pthread_mutex_unlock(&dl_mutex);
1467  return res;
1468}
1469
1470void* os::get_default_process_handle() {
1471  return (void*)::dlopen(NULL, RTLD_LAZY);
1472}
1473
1474void os::print_dll_info(outputStream *st) {
1475  st->print_cr("Dynamic libraries:");
1476  LoadedLibraries::print(st);
1477}
1478
1479void os::print_os_info(outputStream* st) {
1480  st->print("OS:");
1481
1482  st->print("uname:");
1483  struct utsname name;
1484  uname(&name);
1485  st->print(name.sysname); st->print(" ");
1486  st->print(name.nodename); st->print(" ");
1487  st->print(name.release); st->print(" ");
1488  st->print(name.version); st->print(" ");
1489  st->print(name.machine);
1490  st->cr();
1491
1492  // rlimit
1493  st->print("rlimit:");
1494  struct rlimit rlim;
1495
1496  st->print(" STACK ");
1497  getrlimit(RLIMIT_STACK, &rlim);
1498  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1499  else st->print("%uk", rlim.rlim_cur >> 10);
1500
1501  st->print(", CORE ");
1502  getrlimit(RLIMIT_CORE, &rlim);
1503  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1504  else st->print("%uk", rlim.rlim_cur >> 10);
1505
1506  st->print(", NPROC ");
1507  st->print("%d", sysconf(_SC_CHILD_MAX));
1508
1509  st->print(", NOFILE ");
1510  getrlimit(RLIMIT_NOFILE, &rlim);
1511  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1512  else st->print("%d", rlim.rlim_cur);
1513
1514  st->print(", AS ");
1515  getrlimit(RLIMIT_AS, &rlim);
1516  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1517  else st->print("%uk", rlim.rlim_cur >> 10);
1518
1519  // Print limits on DATA, because it limits the C-heap.
1520  st->print(", DATA ");
1521  getrlimit(RLIMIT_DATA, &rlim);
1522  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1523  else st->print("%uk", rlim.rlim_cur >> 10);
1524  st->cr();
1525
1526  // load average
1527  st->print("load average:");
1528  double loadavg[3] = {-1.L, -1.L, -1.L};
1529  os::loadavg(loadavg, 3);
1530  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1531  st->cr();
1532}
1533
1534void os::print_memory_info(outputStream* st) {
1535
1536  st->print_cr("Memory:");
1537
1538  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1539  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1540  st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1541  st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1542  st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1543  if (g_multipage_error != 0) {
1544    st->print_cr("  multipage error: %d", g_multipage_error);
1545  }
1546
1547  // print out LDR_CNTRL because it affects the default page sizes
1548  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1549  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1550
1551  const char* const extshm = ::getenv("EXTSHM");
1552  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1553
1554  // Call os::Aix::get_meminfo() to retrieve memory statistics.
1555  os::Aix::meminfo_t mi;
1556  if (os::Aix::get_meminfo(&mi)) {
1557    char buffer[256];
1558    if (os::Aix::on_aix()) {
1559      jio_snprintf(buffer, sizeof(buffer),
1560                   "  physical total : %llu\n"
1561                   "  physical free  : %llu\n"
1562                   "  swap total     : %llu\n"
1563                   "  swap free      : %llu\n",
1564                   mi.real_total,
1565                   mi.real_free,
1566                   mi.pgsp_total,
1567                   mi.pgsp_free);
1568    } else {
1569      Unimplemented();
1570    }
1571    st->print_raw(buffer);
1572  } else {
1573    st->print_cr("  (no more information available)");
1574  }
1575}
1576
1577void os::pd_print_cpu_info(outputStream* st) {
1578  // cpu
1579  st->print("CPU:");
1580  st->print("total %d", os::processor_count());
1581  // It's not safe to query number of active processors after crash
1582  // st->print("(active %d)", os::active_processor_count());
1583  st->print(" %s", VM_Version::cpu_features());
1584  st->cr();
1585}
1586
1587void os::print_siginfo(outputStream* st, void* siginfo) {
1588  // Use common posix version.
1589  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1590  st->cr();
1591}
1592
1593
1594static void print_signal_handler(outputStream* st, int sig,
1595                                 char* buf, size_t buflen);
1596
1597void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1598  st->print_cr("Signal Handlers:");
1599  print_signal_handler(st, SIGSEGV, buf, buflen);
1600  print_signal_handler(st, SIGBUS , buf, buflen);
1601  print_signal_handler(st, SIGFPE , buf, buflen);
1602  print_signal_handler(st, SIGPIPE, buf, buflen);
1603  print_signal_handler(st, SIGXFSZ, buf, buflen);
1604  print_signal_handler(st, SIGILL , buf, buflen);
1605  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1606  print_signal_handler(st, SR_signum, buf, buflen);
1607  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1608  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1609  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1610  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1611  print_signal_handler(st, SIGTRAP, buf, buflen);
1612  print_signal_handler(st, SIGDANGER, buf, buflen);
1613}
1614
1615static char saved_jvm_path[MAXPATHLEN] = {0};
1616
1617// Find the full path to the current module, libjvm.so or libjvm_g.so
1618void os::jvm_path(char *buf, jint buflen) {
1619  // Error checking.
1620  if (buflen < MAXPATHLEN) {
1621    assert(false, "must use a large-enough buffer");
1622    buf[0] = '\0';
1623    return;
1624  }
1625  // Lazy resolve the path to current module.
1626  if (saved_jvm_path[0] != 0) {
1627    strcpy(buf, saved_jvm_path);
1628    return;
1629  }
1630
1631  Dl_info dlinfo;
1632  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1633  assert(ret != 0, "cannot locate libjvm");
1634  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1635  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1636
1637  strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
1638  saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
1639}
1640
1641void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1642  // no prefix required, not even "_"
1643}
1644
1645void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1646  // no suffix required
1647}
1648
1649////////////////////////////////////////////////////////////////////////////////
1650// sun.misc.Signal support
1651
1652static volatile jint sigint_count = 0;
1653
1654static void
1655UserHandler(int sig, void *siginfo, void *context) {
1656  // 4511530 - sem_post is serialized and handled by the manager thread. When
1657  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1658  // don't want to flood the manager thread with sem_post requests.
1659  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1660    return;
1661
1662  // Ctrl-C is pressed during error reporting, likely because the error
1663  // handler fails to abort. Let VM die immediately.
1664  if (sig == SIGINT && is_error_reported()) {
1665    os::die();
1666  }
1667
1668  os::signal_notify(sig);
1669}
1670
1671void* os::user_handler() {
1672  return CAST_FROM_FN_PTR(void*, UserHandler);
1673}
1674
1675extern "C" {
1676  typedef void (*sa_handler_t)(int);
1677  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1678}
1679
1680void* os::signal(int signal_number, void* handler) {
1681  struct sigaction sigAct, oldSigAct;
1682
1683  sigfillset(&(sigAct.sa_mask));
1684
1685  // Do not block out synchronous signals in the signal handler.
1686  // Blocking synchronous signals only makes sense if you can really
1687  // be sure that those signals won't happen during signal handling,
1688  // when the blocking applies.  Normal signal handlers are lean and
1689  // do not cause signals. But our signal handlers tend to be "risky"
1690  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1691  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1692  // by a SIGILL, which was blocked due to the signal mask. The process
1693  // just hung forever. Better to crash from a secondary signal than to hang.
1694  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1695  sigdelset(&(sigAct.sa_mask), SIGBUS);
1696  sigdelset(&(sigAct.sa_mask), SIGILL);
1697  sigdelset(&(sigAct.sa_mask), SIGFPE);
1698  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1699
1700  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1701
1702  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1703
1704  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1705    // -1 means registration failed
1706    return (void *)-1;
1707  }
1708
1709  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1710}
1711
1712void os::signal_raise(int signal_number) {
1713  ::raise(signal_number);
1714}
1715
1716//
1717// The following code is moved from os.cpp for making this
1718// code platform specific, which it is by its very nature.
1719//
1720
1721// Will be modified when max signal is changed to be dynamic
1722int os::sigexitnum_pd() {
1723  return NSIG;
1724}
1725
1726// a counter for each possible signal value
1727static volatile jint pending_signals[NSIG+1] = { 0 };
1728
1729// Linux(POSIX) specific hand shaking semaphore.
1730static sem_t sig_sem;
1731
1732void os::signal_init_pd() {
1733  // Initialize signal structures
1734  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1735
1736  // Initialize signal semaphore
1737  int rc = ::sem_init(&sig_sem, 0, 0);
1738  guarantee(rc != -1, "sem_init failed");
1739}
1740
1741void os::signal_notify(int sig) {
1742  Atomic::inc(&pending_signals[sig]);
1743  ::sem_post(&sig_sem);
1744}
1745
1746static int check_pending_signals(bool wait) {
1747  Atomic::store(0, &sigint_count);
1748  for (;;) {
1749    for (int i = 0; i < NSIG + 1; i++) {
1750      jint n = pending_signals[i];
1751      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1752        return i;
1753      }
1754    }
1755    if (!wait) {
1756      return -1;
1757    }
1758    JavaThread *thread = JavaThread::current();
1759    ThreadBlockInVM tbivm(thread);
1760
1761    bool threadIsSuspended;
1762    do {
1763      thread->set_suspend_equivalent();
1764      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1765
1766      ::sem_wait(&sig_sem);
1767
1768      // were we externally suspended while we were waiting?
1769      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1770      if (threadIsSuspended) {
1771        //
1772        // The semaphore has been incremented, but while we were waiting
1773        // another thread suspended us. We don't want to continue running
1774        // while suspended because that would surprise the thread that
1775        // suspended us.
1776        //
1777        ::sem_post(&sig_sem);
1778
1779        thread->java_suspend_self();
1780      }
1781    } while (threadIsSuspended);
1782  }
1783}
1784
1785int os::signal_lookup() {
1786  return check_pending_signals(false);
1787}
1788
1789int os::signal_wait() {
1790  return check_pending_signals(true);
1791}
1792
1793////////////////////////////////////////////////////////////////////////////////
1794// Virtual Memory
1795
1796// AddrRange describes an immutable address range
1797//
1798// This is a helper class for the 'shared memory bookkeeping' below.
1799class AddrRange {
1800  friend class ShmBkBlock;
1801
1802  char* _start;
1803  size_t _size;
1804
1805public:
1806
1807  AddrRange(char* start, size_t size)
1808    : _start(start), _size(size)
1809  {}
1810
1811  AddrRange(const AddrRange& r)
1812    : _start(r.start()), _size(r.size())
1813  {}
1814
1815  char* start() const { return _start; }
1816  size_t size() const { return _size; }
1817  char* end() const { return _start + _size; }
1818  bool is_empty() const { return _size == 0 ? true : false; }
1819
1820  static AddrRange empty_range() { return AddrRange(NULL, 0); }
1821
1822  bool contains(const char* p) const {
1823    return start() <= p && end() > p;
1824  }
1825
1826  bool contains(const AddrRange& range) const {
1827    return start() <= range.start() && end() >= range.end();
1828  }
1829
1830  bool intersects(const AddrRange& range) const {
1831    return (range.start() <= start() && range.end() > start()) ||
1832           (range.start() < end() && range.end() >= end()) ||
1833           contains(range);
1834  }
1835
1836  bool is_same_range(const AddrRange& range) const {
1837    return start() == range.start() && size() == range.size();
1838  }
1839
1840  // return the closest inside range consisting of whole pages
1841  AddrRange find_closest_aligned_range(size_t pagesize) const {
1842    if (pagesize == 0 || is_empty()) {
1843      return empty_range();
1844    }
1845    char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1846    char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1847    if (from > to) {
1848      return empty_range();
1849    }
1850    return AddrRange(from, to - from);
1851  }
1852};
1853
1854////////////////////////////////////////////////////////////////////////////
1855// shared memory bookkeeping
1856//
1857// the os::reserve_memory() API and friends hand out different kind of memory, depending
1858// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1859//
1860// But these memory types have to be treated differently. For example, to uncommit
1861// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1862// disclaim64() is needed.
1863//
1864// Therefore we need to keep track of the allocated memory segments and their
1865// properties.
1866
1867// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1868class ShmBkBlock : public CHeapObj<mtInternal> {
1869
1870  ShmBkBlock* _next;
1871
1872protected:
1873
1874  AddrRange _range;
1875  const size_t _pagesize;
1876  const bool _pinned;
1877
1878public:
1879
1880  ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1881    : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1882
1883    assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1884    assert(!_range.is_empty(), "invalid range");
1885  }
1886
1887  virtual void print(outputStream* st) const {
1888    st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1889              _range.start(), _range.end(), _range.size(),
1890              _range.size() / _pagesize, describe_pagesize(_pagesize),
1891              _pinned ? "pinned" : "");
1892  }
1893
1894  enum Type { MMAP, SHMAT };
1895  virtual Type getType() = 0;
1896
1897  char* base() const { return _range.start(); }
1898  size_t size() const { return _range.size(); }
1899
1900  void setAddrRange(AddrRange range) {
1901    _range = range;
1902  }
1903
1904  bool containsAddress(const char* p) const {
1905    return _range.contains(p);
1906  }
1907
1908  bool containsRange(const char* p, size_t size) const {
1909    return _range.contains(AddrRange((char*)p, size));
1910  }
1911
1912  bool isSameRange(const char* p, size_t size) const {
1913    return _range.is_same_range(AddrRange((char*)p, size));
1914  }
1915
1916  virtual bool disclaim(char* p, size_t size) = 0;
1917  virtual bool release() = 0;
1918
1919  // blocks live in a list.
1920  ShmBkBlock* next() const { return _next; }
1921  void set_next(ShmBkBlock* blk) { _next = blk; }
1922
1923}; // end: ShmBkBlock
1924
1925
1926// ShmBkMappedBlock: describes an block allocated with mmap()
1927class ShmBkMappedBlock : public ShmBkBlock {
1928public:
1929
1930  ShmBkMappedBlock(AddrRange range)
1931    : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1932
1933  void print(outputStream* st) const {
1934    ShmBkBlock::print(st);
1935    st->print_cr(" - mmap'ed");
1936  }
1937
1938  Type getType() {
1939    return MMAP;
1940  }
1941
1942  bool disclaim(char* p, size_t size) {
1943
1944    AddrRange r(p, size);
1945
1946    guarantee(_range.contains(r), "invalid disclaim");
1947
1948    // only disclaim whole ranges.
1949    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1950    if (r2.is_empty()) {
1951      return true;
1952    }
1953
1954    const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1955
1956    if (rc != 0) {
1957      warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1958    }
1959
1960    return rc == 0 ? true : false;
1961  }
1962
1963  bool release() {
1964    // mmap'ed blocks are released using munmap
1965    if (::munmap(_range.start(), _range.size()) != 0) {
1966      warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1967      return false;
1968    }
1969    return true;
1970  }
1971}; // end: ShmBkMappedBlock
1972
1973// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
1974class ShmBkShmatedBlock : public ShmBkBlock {
1975public:
1976
1977  ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
1978    : ShmBkBlock(range, pagesize, pinned) {}
1979
1980  void print(outputStream* st) const {
1981    ShmBkBlock::print(st);
1982    st->print_cr(" - shmat'ed");
1983  }
1984
1985  Type getType() {
1986    return SHMAT;
1987  }
1988
1989  bool disclaim(char* p, size_t size) {
1990
1991    AddrRange r(p, size);
1992
1993    if (_pinned) {
1994      return true;
1995    }
1996
1997    // shmat'ed blocks are disclaimed using disclaim64
1998    guarantee(_range.contains(r), "invalid disclaim");
1999
2000    // only disclaim whole ranges.
2001    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2002    if (r2.is_empty()) {
2003      return true;
2004    }
2005
2006    const bool rc = my_disclaim64(r2.start(), r2.size());
2007
2008    if (Verbose && !rc) {
2009      warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2010    }
2011
2012    return rc;
2013  }
2014
2015  bool release() {
2016    bool rc = false;
2017    if (::shmdt(_range.start()) != 0) {
2018      warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2019    } else {
2020      rc = true;
2021    }
2022    return rc;
2023  }
2024
2025}; // end: ShmBkShmatedBlock
2026
2027static ShmBkBlock* g_shmbk_list = NULL;
2028static volatile jint g_shmbk_table_lock = 0;
2029
2030// keep some usage statistics
2031static struct {
2032  int nodes;    // number of nodes in list
2033  size_t bytes; // reserved - not committed - bytes.
2034  int reserves; // how often reserve was called
2035  int lookups;  // how often a lookup was made
2036} g_shmbk_stats = { 0, 0, 0, 0 };
2037
2038// add information about a shared memory segment to the bookkeeping
2039static void shmbk_register(ShmBkBlock* p_block) {
2040  guarantee(p_block, "logic error");
2041  p_block->set_next(g_shmbk_list);
2042  g_shmbk_list = p_block;
2043  g_shmbk_stats.reserves ++;
2044  g_shmbk_stats.bytes += p_block->size();
2045  g_shmbk_stats.nodes ++;
2046}
2047
2048// remove information about a shared memory segment by its starting address
2049static void shmbk_unregister(ShmBkBlock* p_block) {
2050  ShmBkBlock* p = g_shmbk_list;
2051  ShmBkBlock* prev = NULL;
2052  while (p) {
2053    if (p == p_block) {
2054      if (prev) {
2055        prev->set_next(p->next());
2056      } else {
2057        g_shmbk_list = p->next();
2058      }
2059      g_shmbk_stats.nodes --;
2060      g_shmbk_stats.bytes -= p->size();
2061      return;
2062    }
2063    prev = p;
2064    p = p->next();
2065  }
2066  assert(false, "should not happen");
2067}
2068
2069// given a pointer, return shared memory bookkeeping record for the segment it points into
2070// using the returned block info must happen under lock protection
2071static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2072  g_shmbk_stats.lookups ++;
2073  ShmBkBlock* p = g_shmbk_list;
2074  while (p) {
2075    if (p->containsAddress(addr)) {
2076      return p;
2077    }
2078    p = p->next();
2079  }
2080  return NULL;
2081}
2082
2083// dump all information about all memory segments allocated with os::reserve_memory()
2084void shmbk_dump_info() {
2085  tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2086    "total reserves: %d total lookups: %d)",
2087    g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2088  const ShmBkBlock* p = g_shmbk_list;
2089  int i = 0;
2090  while (p) {
2091    p->print(tty);
2092    p = p->next();
2093    i ++;
2094  }
2095}
2096
2097#define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
2098#define UNLOCK_SHMBK   }
2099
2100// End: shared memory bookkeeping
2101////////////////////////////////////////////////////////////////////////////////////////////////////
2102
2103int os::vm_page_size() {
2104  // Seems redundant as all get out
2105  assert(os::Aix::page_size() != -1, "must call os::init");
2106  return os::Aix::page_size();
2107}
2108
2109// Aix allocates memory by pages.
2110int os::vm_allocation_granularity() {
2111  assert(os::Aix::page_size() != -1, "must call os::init");
2112  return os::Aix::page_size();
2113}
2114
2115int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2116
2117  // Commit is a noop. There is no explicit commit
2118  // needed on AIX. Memory is committed when touched.
2119  //
2120  // Debug : check address range for validity
2121#ifdef ASSERT
2122  LOCK_SHMBK
2123    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2124    if (!block) {
2125      fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2126      shmbk_dump_info();
2127      assert(false, "invalid pointer");
2128      return false;
2129    } else if (!block->containsRange(addr, size)) {
2130      fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2131      shmbk_dump_info();
2132      assert(false, "invalid range");
2133      return false;
2134    }
2135  UNLOCK_SHMBK
2136#endif // ASSERT
2137
2138  return 0;
2139}
2140
2141bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2142  return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2143}
2144
2145void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2146                                  const char* mesg) {
2147  assert(mesg != NULL, "mesg must be specified");
2148  os::Aix::commit_memory_impl(addr, size, exec);
2149}
2150
2151int os::Aix::commit_memory_impl(char* addr, size_t size,
2152                                size_t alignment_hint, bool exec) {
2153  return os::Aix::commit_memory_impl(addr, size, exec);
2154}
2155
2156bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2157                          bool exec) {
2158  return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2159}
2160
2161void os::pd_commit_memory_or_exit(char* addr, size_t size,
2162                                  size_t alignment_hint, bool exec,
2163                                  const char* mesg) {
2164  os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2165}
2166
2167bool os::pd_uncommit_memory(char* addr, size_t size) {
2168
2169  // Delegate to ShmBkBlock class which knows how to uncommit its memory.
2170
2171  bool rc = false;
2172  LOCK_SHMBK
2173    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2174    if (!block) {
2175      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2176      shmbk_dump_info();
2177      assert(false, "invalid pointer");
2178      return false;
2179    } else if (!block->containsRange(addr, size)) {
2180      fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2181      shmbk_dump_info();
2182      assert(false, "invalid range");
2183      return false;
2184    }
2185    rc = block->disclaim(addr, size);
2186  UNLOCK_SHMBK
2187
2188  if (Verbose && !rc) {
2189    warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2190  }
2191  return rc;
2192}
2193
2194bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2195  return os::guard_memory(addr, size);
2196}
2197
2198bool os::remove_stack_guard_pages(char* addr, size_t size) {
2199  return os::unguard_memory(addr, size);
2200}
2201
2202void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2203}
2204
2205void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2206}
2207
2208void os::numa_make_global(char *addr, size_t bytes) {
2209}
2210
2211void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2212}
2213
2214bool os::numa_topology_changed() {
2215  return false;
2216}
2217
2218size_t os::numa_get_groups_num() {
2219  return 1;
2220}
2221
2222int os::numa_get_group_id() {
2223  return 0;
2224}
2225
2226size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2227  if (size > 0) {
2228    ids[0] = 0;
2229    return 1;
2230  }
2231  return 0;
2232}
2233
2234bool os::get_page_info(char *start, page_info* info) {
2235  return false;
2236}
2237
2238char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2239  return end;
2240}
2241
2242// Flags for reserve_shmatted_memory:
2243#define RESSHM_WISHADDR_OR_FAIL                     1
2244#define RESSHM_TRY_16M_PAGES                        2
2245#define RESSHM_16M_PAGES_OR_FAIL                    4
2246
2247// Result of reserve_shmatted_memory:
2248struct shmatted_memory_info_t {
2249  char* addr;
2250  size_t pagesize;
2251  bool pinned;
2252};
2253
2254// Reserve a section of shmatted memory.
2255// params:
2256// bytes [in]: size of memory, in bytes
2257// requested_addr [in]: wish address.
2258//                      NULL = no wish.
2259//                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2260//                      be obtained, function will fail. Otherwise wish address is treated as hint and
2261//                      another pointer is returned.
2262// flags [in]:          some flags. Valid flags are:
2263//                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2264//                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2265//                          (requires UseLargePages and Use16MPages)
2266//                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2267//                          Otherwise any other page size will do.
2268// p_info [out] :       holds information about the created shared memory segment.
2269static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2270
2271  assert(p_info, "parameter error");
2272
2273  // init output struct.
2274  p_info->addr = NULL;
2275
2276  // neither should we be here for EXTSHM=ON.
2277  if (os::Aix::extshm()) {
2278    ShouldNotReachHere();
2279  }
2280
2281  // extract flags. sanity checks.
2282  const bool wishaddr_or_fail =
2283    flags & RESSHM_WISHADDR_OR_FAIL;
2284  const bool try_16M_pages =
2285    flags & RESSHM_TRY_16M_PAGES;
2286  const bool f16M_pages_or_fail =
2287    flags & RESSHM_16M_PAGES_OR_FAIL;
2288
2289  // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2290  // shmat will fail anyway, so save some cycles by failing right away
2291  if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2292    if (wishaddr_or_fail) {
2293      return false;
2294    } else {
2295      requested_addr = NULL;
2296    }
2297  }
2298
2299  char* addr = NULL;
2300
2301  // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2302  // pagesize dynamically.
2303  const size_t size = align_size_up(bytes, SIZE_16M);
2304
2305  // reserve the shared segment
2306  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2307  if (shmid == -1) {
2308    warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2309    return false;
2310  }
2311
2312  // Important note:
2313  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2314  // We must right after attaching it remove it from the system. System V shm segments are global and
2315  // survive the process.
2316  // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2317
2318  // try forcing the page size
2319  size_t pagesize = -1; // unknown so far
2320
2321  if (UseLargePages) {
2322
2323    struct shmid_ds shmbuf;
2324    memset(&shmbuf, 0, sizeof(shmbuf));
2325
2326    // First, try to take from 16M page pool if...
2327    if (os::Aix::can_use_16M_pages()  // we can ...
2328        && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2329        && try_16M_pages) {           // caller wants us to.
2330      shmbuf.shm_pagesize = SIZE_16M;
2331      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2332        pagesize = SIZE_16M;
2333      } else {
2334        warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2335                size / SIZE_16M, errno);
2336        if (f16M_pages_or_fail) {
2337          goto cleanup_shm;
2338        }
2339      }
2340    }
2341
2342    // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2343    // because the 64K page pool may also be exhausted.
2344    if (pagesize == -1) {
2345      shmbuf.shm_pagesize = SIZE_64K;
2346      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2347        pagesize = SIZE_64K;
2348      } else {
2349        warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2350                size / SIZE_64K, errno);
2351        // here I give up. leave page_size -1 - later, after attaching, we will query the
2352        // real page size of the attached memory. (in theory, it may be something different
2353        // from 4K if LDR_CNTRL SHM_PSIZE is set)
2354      }
2355    }
2356  }
2357
2358  // sanity point
2359  assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2360
2361  // Now attach the shared segment.
2362  addr = (char*) shmat(shmid, requested_addr, 0);
2363  if (addr == (char*)-1) {
2364    // How to handle attach failure:
2365    // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2366    // mandatory, fail, if not, retry anywhere.
2367    // If it failed for any other reason, treat that as fatal error.
2368    addr = NULL;
2369    if (requested_addr) {
2370      if (wishaddr_or_fail) {
2371        goto cleanup_shm;
2372      } else {
2373        addr = (char*) shmat(shmid, NULL, 0);
2374        if (addr == (char*)-1) { // fatal
2375          addr = NULL;
2376          warning("shmat failed (errno: %d)", errno);
2377          goto cleanup_shm;
2378        }
2379      }
2380    } else { // fatal
2381      addr = NULL;
2382      warning("shmat failed (errno: %d)", errno);
2383      goto cleanup_shm;
2384    }
2385  }
2386
2387  // sanity point
2388  assert(addr && addr != (char*) -1, "wrong address");
2389
2390  // after successful Attach remove the segment - right away.
2391  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2392    warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2393    guarantee(false, "failed to remove shared memory segment!");
2394  }
2395  shmid = -1;
2396
2397  // query the real page size. In case setting the page size did not work (see above), the system
2398  // may have given us something other then 4K (LDR_CNTRL)
2399  {
2400    const size_t real_pagesize = os::Aix::query_pagesize(addr);
2401    if (pagesize != -1) {
2402      assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2403    } else {
2404      pagesize = real_pagesize;
2405    }
2406  }
2407
2408  // Now register the reserved block with internal book keeping.
2409  LOCK_SHMBK
2410    const bool pinned = pagesize >= SIZE_16M ? true : false;
2411    ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2412    assert(p_block, "");
2413    shmbk_register(p_block);
2414  UNLOCK_SHMBK
2415
2416cleanup_shm:
2417
2418  // if we have not done so yet, remove the shared memory segment. This is very important.
2419  if (shmid != -1) {
2420    if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2421      warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2422      guarantee(false, "failed to remove shared memory segment!");
2423    }
2424    shmid = -1;
2425  }
2426
2427  // trace
2428  if (Verbose && !addr) {
2429    if (requested_addr != NULL) {
2430      warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
2431    } else {
2432      warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2433    }
2434  }
2435
2436  // hand info to caller
2437  if (addr) {
2438    p_info->addr = addr;
2439    p_info->pagesize = pagesize;
2440    p_info->pinned = pagesize == SIZE_16M ? true : false;
2441  }
2442
2443  // sanity test:
2444  if (requested_addr && addr && wishaddr_or_fail) {
2445    guarantee(addr == requested_addr, "shmat error");
2446  }
2447
2448  // just one more test to really make sure we have no dangling shm segments.
2449  guarantee(shmid == -1, "dangling shm segments");
2450
2451  return addr ? true : false;
2452
2453} // end: reserve_shmatted_memory
2454
2455// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2456// will return NULL in case of an error.
2457static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2458
2459  // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2460  if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2461    warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2462    return NULL;
2463  }
2464
2465  const size_t size = align_size_up(bytes, SIZE_4K);
2466
2467  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2468  // msync(MS_INVALIDATE) (see os::uncommit_memory)
2469  int flags = MAP_ANONYMOUS | MAP_SHARED;
2470
2471  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2472  // it means if wishaddress is given but MAP_FIXED is not set.
2473  //
2474  // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2475  // clobbers the address range, which is probably not what the caller wants. That's
2476  // why I assert here (again) that the SPEC1170 compat mode is off.
2477  // If we want to be able to run under SPEC1170, we have to do some porting and
2478  // testing.
2479  if (requested_addr != NULL) {
2480    assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2481    flags |= MAP_FIXED;
2482  }
2483
2484  char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2485
2486  if (addr == MAP_FAILED) {
2487    // attach failed: tolerate for specific wish addresses. Not being able to attach
2488    // anywhere is a fatal error.
2489    if (requested_addr == NULL) {
2490      // It's ok to fail here if the machine has not enough memory.
2491      warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2492    }
2493    addr = NULL;
2494    goto cleanup_mmap;
2495  }
2496
2497  // If we did request a specific address and that address was not available, fail.
2498  if (addr && requested_addr) {
2499    guarantee(addr == requested_addr, "unexpected");
2500  }
2501
2502  // register this mmap'ed segment with book keeping
2503  LOCK_SHMBK
2504    ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2505    assert(p_block, "");
2506    shmbk_register(p_block);
2507  UNLOCK_SHMBK
2508
2509cleanup_mmap:
2510
2511  // trace
2512  if (Verbose) {
2513    if (addr) {
2514      fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2515    }
2516    else {
2517      if (requested_addr != NULL) {
2518        warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2519      } else {
2520        warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2521      }
2522    }
2523  }
2524
2525  return addr;
2526
2527} // end: reserve_mmaped_memory
2528
2529// Reserves and attaches a shared memory segment.
2530// Will assert if a wish address is given and could not be obtained.
2531char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2532  return os::attempt_reserve_memory_at(bytes, requested_addr);
2533}
2534
2535bool os::pd_release_memory(char* addr, size_t size) {
2536
2537  // delegate to ShmBkBlock class which knows how to uncommit its memory.
2538
2539  bool rc = false;
2540  LOCK_SHMBK
2541    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2542    if (!block) {
2543      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2544      shmbk_dump_info();
2545      assert(false, "invalid pointer");
2546      return false;
2547    }
2548    else if (!block->isSameRange(addr, size)) {
2549      if (block->getType() == ShmBkBlock::MMAP) {
2550        // Release only the same range or a the beginning or the end of a range.
2551        if (block->base() == addr && size < block->size()) {
2552          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2553          assert(b, "");
2554          shmbk_register(b);
2555          block->setAddrRange(AddrRange(addr, size));
2556        }
2557        else if (addr > block->base() && addr + size == block->base() + block->size()) {
2558          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2559          assert(b, "");
2560          shmbk_register(b);
2561          block->setAddrRange(AddrRange(addr, size));
2562        }
2563        else {
2564          fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2565          shmbk_dump_info();
2566          assert(false, "invalid mmap range");
2567          return false;
2568        }
2569      }
2570      else {
2571        // Release only the same range. No partial release allowed.
2572        // Soften the requirement a bit, because the user may think he owns a smaller size
2573        // than the block is due to alignment etc.
2574        if (block->base() != addr || block->size() < size) {
2575          fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2576          shmbk_dump_info();
2577          assert(false, "invalid shmget range");
2578          return false;
2579        }
2580      }
2581    }
2582    rc = block->release();
2583    assert(rc, "release failed");
2584    // remove block from bookkeeping
2585    shmbk_unregister(block);
2586    delete block;
2587  UNLOCK_SHMBK
2588
2589  if (!rc) {
2590    warning("failed to released %lu bytes at 0x%p", size, addr);
2591  }
2592
2593  return rc;
2594}
2595
2596static bool checked_mprotect(char* addr, size_t size, int prot) {
2597
2598  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2599  // not tell me if protection failed when trying to protect an un-protectable range.
2600  //
2601  // This means if the memory was allocated using shmget/shmat, protection wont work
2602  // but mprotect will still return 0:
2603  //
2604  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2605
2606  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2607
2608  if (!rc) {
2609    const char* const s_errno = strerror(errno);
2610    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2611    return false;
2612  }
2613
2614  // mprotect success check
2615  //
2616  // Mprotect said it changed the protection but can I believe it?
2617  //
2618  // To be sure I need to check the protection afterwards. Try to
2619  // read from protected memory and check whether that causes a segfault.
2620  //
2621  if (!os::Aix::xpg_sus_mode()) {
2622
2623    if (StubRoutines::SafeFetch32_stub()) {
2624
2625      const bool read_protected =
2626        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2627         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2628
2629      if (prot & PROT_READ) {
2630        rc = !read_protected;
2631      } else {
2632        rc = read_protected;
2633      }
2634    }
2635  }
2636  if (!rc) {
2637    assert(false, "mprotect failed.");
2638  }
2639  return rc;
2640}
2641
2642// Set protections specified
2643bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2644  unsigned int p = 0;
2645  switch (prot) {
2646  case MEM_PROT_NONE: p = PROT_NONE; break;
2647  case MEM_PROT_READ: p = PROT_READ; break;
2648  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2649  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2650  default:
2651    ShouldNotReachHere();
2652  }
2653  // is_committed is unused.
2654  return checked_mprotect(addr, size, p);
2655}
2656
2657bool os::guard_memory(char* addr, size_t size) {
2658  return checked_mprotect(addr, size, PROT_NONE);
2659}
2660
2661bool os::unguard_memory(char* addr, size_t size) {
2662  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2663}
2664
2665// Large page support
2666
2667static size_t _large_page_size = 0;
2668
2669// Enable large page support if OS allows that.
2670void os::large_page_init() {
2671
2672  // Note: os::Aix::query_multipage_support must run first.
2673
2674  if (!UseLargePages) {
2675    return;
2676  }
2677
2678  if (!Aix::can_use_64K_pages()) {
2679    assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2680    UseLargePages = false;
2681    return;
2682  }
2683
2684  if (!Aix::can_use_16M_pages() && Use16MPages) {
2685    fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2686            " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2687  }
2688
2689  // Do not report 16M page alignment as part of os::_page_sizes if we are
2690  // explicitly forbidden from using 16M pages. Doing so would increase the
2691  // alignment the garbage collector calculates with, slightly increasing
2692  // heap usage. We should only pay for 16M alignment if we really want to
2693  // use 16M pages.
2694  if (Use16MPages && Aix::can_use_16M_pages()) {
2695    _large_page_size = SIZE_16M;
2696    _page_sizes[0] = SIZE_16M;
2697    _page_sizes[1] = SIZE_64K;
2698    _page_sizes[2] = SIZE_4K;
2699    _page_sizes[3] = 0;
2700  } else if (Aix::can_use_64K_pages()) {
2701    _large_page_size = SIZE_64K;
2702    _page_sizes[0] = SIZE_64K;
2703    _page_sizes[1] = SIZE_4K;
2704    _page_sizes[2] = 0;
2705  }
2706
2707  if (Verbose) {
2708    ("Default large page size is 0x%llX.", _large_page_size);
2709  }
2710} // end: os::large_page_init()
2711
2712char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2713  // "exec" is passed in but not used. Creating the shared image for
2714  // the code cache doesn't have an SHM_X executable permission to check.
2715  Unimplemented();
2716  return 0;
2717}
2718
2719bool os::release_memory_special(char* base, size_t bytes) {
2720  // detaching the SHM segment will also delete it, see reserve_memory_special()
2721  Unimplemented();
2722  return false;
2723}
2724
2725size_t os::large_page_size() {
2726  return _large_page_size;
2727}
2728
2729bool os::can_commit_large_page_memory() {
2730  // Well, sadly we cannot commit anything at all (see comment in
2731  // os::commit_memory) but we claim to so we can make use of large pages
2732  return true;
2733}
2734
2735bool os::can_execute_large_page_memory() {
2736  // We can do that
2737  return true;
2738}
2739
2740// Reserve memory at an arbitrary address, only if that area is
2741// available (and not reserved for something else).
2742char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2743
2744  bool use_mmap = false;
2745
2746  // mmap: smaller graining, no large page support
2747  // shm: large graining (256M), large page support, limited number of shm segments
2748  //
2749  // Prefer mmap wherever we either do not need large page support or have OS limits
2750
2751  if (!UseLargePages || bytes < SIZE_16M) {
2752    use_mmap = true;
2753  }
2754
2755  char* addr = NULL;
2756  if (use_mmap) {
2757    addr = reserve_mmaped_memory(bytes, requested_addr);
2758  } else {
2759    // shmat: wish address is mandatory, and do not try 16M pages here.
2760    shmatted_memory_info_t info;
2761    const int flags = RESSHM_WISHADDR_OR_FAIL;
2762    if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2763      addr = info.addr;
2764    }
2765  }
2766
2767  return addr;
2768}
2769
2770size_t os::read(int fd, void *buf, unsigned int nBytes) {
2771  return ::read(fd, buf, nBytes);
2772}
2773
2774size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
2775  return ::pread(fd, buf, nBytes, offset);
2776}
2777
2778void os::naked_short_sleep(jlong ms) {
2779  struct timespec req;
2780
2781  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2782  req.tv_sec = 0;
2783  if (ms > 0) {
2784    req.tv_nsec = (ms % 1000) * 1000000;
2785  }
2786  else {
2787    req.tv_nsec = 1;
2788  }
2789
2790  nanosleep(&req, NULL);
2791
2792  return;
2793}
2794
2795// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2796void os::infinite_sleep() {
2797  while (true) {    // sleep forever ...
2798    ::sleep(100);   // ... 100 seconds at a time
2799  }
2800}
2801
2802// Used to convert frequent JVM_Yield() to nops
2803bool os::dont_yield() {
2804  return DontYieldALot;
2805}
2806
2807void os::naked_yield() {
2808  sched_yield();
2809}
2810
2811////////////////////////////////////////////////////////////////////////////////
2812// thread priority support
2813
2814// From AIX manpage to pthread_setschedparam
2815// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2816//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2817//
2818// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2819// range from 40 to 80, where 40 is the least favored priority and 80
2820// is the most favored."
2821//
2822// (Actually, I doubt this even has an impact on AIX, as we do kernel
2823// scheduling there; however, this still leaves iSeries.)
2824//
2825// We use the same values for AIX and PASE.
2826int os::java_to_os_priority[CriticalPriority + 1] = {
2827  54,             // 0 Entry should never be used
2828
2829  55,             // 1 MinPriority
2830  55,             // 2
2831  56,             // 3
2832
2833  56,             // 4
2834  57,             // 5 NormPriority
2835  57,             // 6
2836
2837  58,             // 7
2838  58,             // 8
2839  59,             // 9 NearMaxPriority
2840
2841  60,             // 10 MaxPriority
2842
2843  60              // 11 CriticalPriority
2844};
2845
2846OSReturn os::set_native_priority(Thread* thread, int newpri) {
2847  if (!UseThreadPriorities) return OS_OK;
2848  pthread_t thr = thread->osthread()->pthread_id();
2849  int policy = SCHED_OTHER;
2850  struct sched_param param;
2851  param.sched_priority = newpri;
2852  int ret = pthread_setschedparam(thr, policy, &param);
2853
2854  if (Verbose) {
2855    if (ret == 0) {
2856      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
2857    } else {
2858      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
2859              (int)thr, newpri, ret, strerror(ret));
2860    }
2861  }
2862  return (ret == 0) ? OS_OK : OS_ERR;
2863}
2864
2865OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2866  if (!UseThreadPriorities) {
2867    *priority_ptr = java_to_os_priority[NormPriority];
2868    return OS_OK;
2869  }
2870  pthread_t thr = thread->osthread()->pthread_id();
2871  int policy = SCHED_OTHER;
2872  struct sched_param param;
2873  int ret = pthread_getschedparam(thr, &policy, &param);
2874  *priority_ptr = param.sched_priority;
2875
2876  return (ret == 0) ? OS_OK : OS_ERR;
2877}
2878
2879// Hint to the underlying OS that a task switch would not be good.
2880// Void return because it's a hint and can fail.
2881void os::hint_no_preempt() {}
2882
2883////////////////////////////////////////////////////////////////////////////////
2884// suspend/resume support
2885
2886//  the low-level signal-based suspend/resume support is a remnant from the
2887//  old VM-suspension that used to be for java-suspension, safepoints etc,
2888//  within hotspot. Now there is a single use-case for this:
2889//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2890//      that runs in the watcher thread.
2891//  The remaining code is greatly simplified from the more general suspension
2892//  code that used to be used.
2893//
2894//  The protocol is quite simple:
2895//  - suspend:
2896//      - sends a signal to the target thread
2897//      - polls the suspend state of the osthread using a yield loop
2898//      - target thread signal handler (SR_handler) sets suspend state
2899//        and blocks in sigsuspend until continued
2900//  - resume:
2901//      - sets target osthread state to continue
2902//      - sends signal to end the sigsuspend loop in the SR_handler
2903//
2904//  Note that the SR_lock plays no role in this suspend/resume protocol.
2905//
2906
2907static void resume_clear_context(OSThread *osthread) {
2908  osthread->set_ucontext(NULL);
2909  osthread->set_siginfo(NULL);
2910}
2911
2912static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2913  osthread->set_ucontext(context);
2914  osthread->set_siginfo(siginfo);
2915}
2916
2917//
2918// Handler function invoked when a thread's execution is suspended or
2919// resumed. We have to be careful that only async-safe functions are
2920// called here (Note: most pthread functions are not async safe and
2921// should be avoided.)
2922//
2923// Note: sigwait() is a more natural fit than sigsuspend() from an
2924// interface point of view, but sigwait() prevents the signal hander
2925// from being run. libpthread would get very confused by not having
2926// its signal handlers run and prevents sigwait()'s use with the
2927// mutex granting granting signal.
2928//
2929// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2930//
2931static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2932  // Save and restore errno to avoid confusing native code with EINTR
2933  // after sigsuspend.
2934  int old_errno = errno;
2935
2936  Thread* thread = Thread::current();
2937  OSThread* osthread = thread->osthread();
2938  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2939
2940  os::SuspendResume::State current = osthread->sr.state();
2941  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2942    suspend_save_context(osthread, siginfo, context);
2943
2944    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2945    os::SuspendResume::State state = osthread->sr.suspended();
2946    if (state == os::SuspendResume::SR_SUSPENDED) {
2947      sigset_t suspend_set;  // signals for sigsuspend()
2948
2949      // get current set of blocked signals and unblock resume signal
2950      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2951      sigdelset(&suspend_set, SR_signum);
2952
2953      // wait here until we are resumed
2954      while (1) {
2955        sigsuspend(&suspend_set);
2956
2957        os::SuspendResume::State result = osthread->sr.running();
2958        if (result == os::SuspendResume::SR_RUNNING) {
2959          break;
2960        }
2961      }
2962
2963    } else if (state == os::SuspendResume::SR_RUNNING) {
2964      // request was cancelled, continue
2965    } else {
2966      ShouldNotReachHere();
2967    }
2968
2969    resume_clear_context(osthread);
2970  } else if (current == os::SuspendResume::SR_RUNNING) {
2971    // request was cancelled, continue
2972  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2973    // ignore
2974  } else {
2975    ShouldNotReachHere();
2976  }
2977
2978  errno = old_errno;
2979}
2980
2981
2982static int SR_initialize() {
2983  struct sigaction act;
2984  char *s;
2985  // Get signal number to use for suspend/resume
2986  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2987    int sig = ::strtol(s, 0, 10);
2988    if (sig > 0 || sig < NSIG) {
2989      SR_signum = sig;
2990    }
2991  }
2992
2993  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2994        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2995
2996  sigemptyset(&SR_sigset);
2997  sigaddset(&SR_sigset, SR_signum);
2998
2999  // Set up signal handler for suspend/resume.
3000  act.sa_flags = SA_RESTART|SA_SIGINFO;
3001  act.sa_handler = (void (*)(int)) SR_handler;
3002
3003  // SR_signum is blocked by default.
3004  // 4528190 - We also need to block pthread restart signal (32 on all
3005  // supported Linux platforms). Note that LinuxThreads need to block
3006  // this signal for all threads to work properly. So we don't have
3007  // to use hard-coded signal number when setting up the mask.
3008  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3009
3010  if (sigaction(SR_signum, &act, 0) == -1) {
3011    return -1;
3012  }
3013
3014  // Save signal flag
3015  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3016  return 0;
3017}
3018
3019static int SR_finalize() {
3020  return 0;
3021}
3022
3023static int sr_notify(OSThread* osthread) {
3024  int status = pthread_kill(osthread->pthread_id(), SR_signum);
3025  assert_status(status == 0, status, "pthread_kill");
3026  return status;
3027}
3028
3029// "Randomly" selected value for how long we want to spin
3030// before bailing out on suspending a thread, also how often
3031// we send a signal to a thread we want to resume
3032static const int RANDOMLY_LARGE_INTEGER = 1000000;
3033static const int RANDOMLY_LARGE_INTEGER2 = 100;
3034
3035// returns true on success and false on error - really an error is fatal
3036// but this seems the normal response to library errors
3037static bool do_suspend(OSThread* osthread) {
3038  assert(osthread->sr.is_running(), "thread should be running");
3039  // mark as suspended and send signal
3040
3041  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3042    // failed to switch, state wasn't running?
3043    ShouldNotReachHere();
3044    return false;
3045  }
3046
3047  if (sr_notify(osthread) != 0) {
3048    // try to cancel, switch to running
3049
3050    os::SuspendResume::State result = osthread->sr.cancel_suspend();
3051    if (result == os::SuspendResume::SR_RUNNING) {
3052      // cancelled
3053      return false;
3054    } else if (result == os::SuspendResume::SR_SUSPENDED) {
3055      // somehow managed to suspend
3056      return true;
3057    } else {
3058      ShouldNotReachHere();
3059      return false;
3060    }
3061  }
3062
3063  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3064
3065  for (int n = 0; !osthread->sr.is_suspended(); n++) {
3066    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3067      os::naked_yield();
3068    }
3069
3070    // timeout, try to cancel the request
3071    if (n >= RANDOMLY_LARGE_INTEGER) {
3072      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3073      if (cancelled == os::SuspendResume::SR_RUNNING) {
3074        return false;
3075      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3076        return true;
3077      } else {
3078        ShouldNotReachHere();
3079        return false;
3080      }
3081    }
3082  }
3083
3084  guarantee(osthread->sr.is_suspended(), "Must be suspended");
3085  return true;
3086}
3087
3088static void do_resume(OSThread* osthread) {
3089  //assert(osthread->sr.is_suspended(), "thread should be suspended");
3090
3091  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3092    // failed to switch to WAKEUP_REQUEST
3093    ShouldNotReachHere();
3094    return;
3095  }
3096
3097  while (!osthread->sr.is_running()) {
3098    if (sr_notify(osthread) == 0) {
3099      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3100        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3101          os::naked_yield();
3102        }
3103      }
3104    } else {
3105      ShouldNotReachHere();
3106    }
3107  }
3108
3109  guarantee(osthread->sr.is_running(), "Must be running!");
3110}
3111
3112///////////////////////////////////////////////////////////////////////////////////
3113// signal handling (except suspend/resume)
3114
3115// This routine may be used by user applications as a "hook" to catch signals.
3116// The user-defined signal handler must pass unrecognized signals to this
3117// routine, and if it returns true (non-zero), then the signal handler must
3118// return immediately. If the flag "abort_if_unrecognized" is true, then this
3119// routine will never retun false (zero), but instead will execute a VM panic
3120// routine kill the process.
3121//
3122// If this routine returns false, it is OK to call it again. This allows
3123// the user-defined signal handler to perform checks either before or after
3124// the VM performs its own checks. Naturally, the user code would be making
3125// a serious error if it tried to handle an exception (such as a null check
3126// or breakpoint) that the VM was generating for its own correct operation.
3127//
3128// This routine may recognize any of the following kinds of signals:
3129//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3130// It should be consulted by handlers for any of those signals.
3131//
3132// The caller of this routine must pass in the three arguments supplied
3133// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3134// field of the structure passed to sigaction(). This routine assumes that
3135// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3136//
3137// Note that the VM will print warnings if it detects conflicting signal
3138// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3139//
3140extern "C" JNIEXPORT int
3141JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3142
3143// Set thread signal mask (for some reason on AIX sigthreadmask() seems
3144// to be the thing to call; documentation is not terribly clear about whether
3145// pthread_sigmask also works, and if it does, whether it does the same.
3146bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3147  const int rc = ::pthread_sigmask(how, set, oset);
3148  // return value semantics differ slightly for error case:
3149  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3150  // (so, pthread_sigmask is more theadsafe for error handling)
3151  // But success is always 0.
3152  return rc == 0 ? true : false;
3153}
3154
3155// Function to unblock all signals which are, according
3156// to POSIX, typical program error signals. If they happen while being blocked,
3157// they typically will bring down the process immediately.
3158bool unblock_program_error_signals() {
3159  sigset_t set;
3160  ::sigemptyset(&set);
3161  ::sigaddset(&set, SIGILL);
3162  ::sigaddset(&set, SIGBUS);
3163  ::sigaddset(&set, SIGFPE);
3164  ::sigaddset(&set, SIGSEGV);
3165  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3166}
3167
3168// Renamed from 'signalHandler' to avoid collision with other shared libs.
3169void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3170  assert(info != NULL && uc != NULL, "it must be old kernel");
3171
3172  // Never leave program error signals blocked;
3173  // on all our platforms they would bring down the process immediately when
3174  // getting raised while being blocked.
3175  unblock_program_error_signals();
3176
3177  JVM_handle_aix_signal(sig, info, uc, true);
3178}
3179
3180
3181// This boolean allows users to forward their own non-matching signals
3182// to JVM_handle_aix_signal, harmlessly.
3183bool os::Aix::signal_handlers_are_installed = false;
3184
3185// For signal-chaining
3186struct sigaction os::Aix::sigact[MAXSIGNUM];
3187unsigned int os::Aix::sigs = 0;
3188bool os::Aix::libjsig_is_loaded = false;
3189typedef struct sigaction *(*get_signal_t)(int);
3190get_signal_t os::Aix::get_signal_action = NULL;
3191
3192struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3193  struct sigaction *actp = NULL;
3194
3195  if (libjsig_is_loaded) {
3196    // Retrieve the old signal handler from libjsig
3197    actp = (*get_signal_action)(sig);
3198  }
3199  if (actp == NULL) {
3200    // Retrieve the preinstalled signal handler from jvm
3201    actp = get_preinstalled_handler(sig);
3202  }
3203
3204  return actp;
3205}
3206
3207static bool call_chained_handler(struct sigaction *actp, int sig,
3208                                 siginfo_t *siginfo, void *context) {
3209  // Call the old signal handler
3210  if (actp->sa_handler == SIG_DFL) {
3211    // It's more reasonable to let jvm treat it as an unexpected exception
3212    // instead of taking the default action.
3213    return false;
3214  } else if (actp->sa_handler != SIG_IGN) {
3215    if ((actp->sa_flags & SA_NODEFER) == 0) {
3216      // automaticlly block the signal
3217      sigaddset(&(actp->sa_mask), sig);
3218    }
3219
3220    sa_handler_t hand = NULL;
3221    sa_sigaction_t sa = NULL;
3222    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3223    // retrieve the chained handler
3224    if (siginfo_flag_set) {
3225      sa = actp->sa_sigaction;
3226    } else {
3227      hand = actp->sa_handler;
3228    }
3229
3230    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3231      actp->sa_handler = SIG_DFL;
3232    }
3233
3234    // try to honor the signal mask
3235    sigset_t oset;
3236    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3237
3238    // call into the chained handler
3239    if (siginfo_flag_set) {
3240      (*sa)(sig, siginfo, context);
3241    } else {
3242      (*hand)(sig);
3243    }
3244
3245    // restore the signal mask
3246    pthread_sigmask(SIG_SETMASK, &oset, 0);
3247  }
3248  // Tell jvm's signal handler the signal is taken care of.
3249  return true;
3250}
3251
3252bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3253  bool chained = false;
3254  // signal-chaining
3255  if (UseSignalChaining) {
3256    struct sigaction *actp = get_chained_signal_action(sig);
3257    if (actp != NULL) {
3258      chained = call_chained_handler(actp, sig, siginfo, context);
3259    }
3260  }
3261  return chained;
3262}
3263
3264struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3265  if ((((unsigned int)1 << sig) & sigs) != 0) {
3266    return &sigact[sig];
3267  }
3268  return NULL;
3269}
3270
3271void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3272  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3273  sigact[sig] = oldAct;
3274  sigs |= (unsigned int)1 << sig;
3275}
3276
3277// for diagnostic
3278int os::Aix::sigflags[MAXSIGNUM];
3279
3280int os::Aix::get_our_sigflags(int sig) {
3281  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3282  return sigflags[sig];
3283}
3284
3285void os::Aix::set_our_sigflags(int sig, int flags) {
3286  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3287  sigflags[sig] = flags;
3288}
3289
3290void os::Aix::set_signal_handler(int sig, bool set_installed) {
3291  // Check for overwrite.
3292  struct sigaction oldAct;
3293  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3294
3295  void* oldhand = oldAct.sa_sigaction
3296    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3297    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3298  // Renamed 'signalHandler' to avoid collision with other shared libs.
3299  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3300      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3301      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3302    if (AllowUserSignalHandlers || !set_installed) {
3303      // Do not overwrite; user takes responsibility to forward to us.
3304      return;
3305    } else if (UseSignalChaining) {
3306      // save the old handler in jvm
3307      save_preinstalled_handler(sig, oldAct);
3308      // libjsig also interposes the sigaction() call below and saves the
3309      // old sigaction on it own.
3310    } else {
3311      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3312                    "%#lx for signal %d.", (long)oldhand, sig));
3313    }
3314  }
3315
3316  struct sigaction sigAct;
3317  sigfillset(&(sigAct.sa_mask));
3318  if (!set_installed) {
3319    sigAct.sa_handler = SIG_DFL;
3320    sigAct.sa_flags = SA_RESTART;
3321  } else {
3322    // Renamed 'signalHandler' to avoid collision with other shared libs.
3323    sigAct.sa_sigaction = javaSignalHandler;
3324    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3325  }
3326  // Save flags, which are set by ours
3327  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3328  sigflags[sig] = sigAct.sa_flags;
3329
3330  int ret = sigaction(sig, &sigAct, &oldAct);
3331  assert(ret == 0, "check");
3332
3333  void* oldhand2 = oldAct.sa_sigaction
3334                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3335                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3336  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3337}
3338
3339// install signal handlers for signals that HotSpot needs to
3340// handle in order to support Java-level exception handling.
3341void os::Aix::install_signal_handlers() {
3342  if (!signal_handlers_are_installed) {
3343    signal_handlers_are_installed = true;
3344
3345    // signal-chaining
3346    typedef void (*signal_setting_t)();
3347    signal_setting_t begin_signal_setting = NULL;
3348    signal_setting_t end_signal_setting = NULL;
3349    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3350                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3351    if (begin_signal_setting != NULL) {
3352      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3353                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3354      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3355                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3356      libjsig_is_loaded = true;
3357      assert(UseSignalChaining, "should enable signal-chaining");
3358    }
3359    if (libjsig_is_loaded) {
3360      // Tell libjsig jvm is setting signal handlers
3361      (*begin_signal_setting)();
3362    }
3363
3364    set_signal_handler(SIGSEGV, true);
3365    set_signal_handler(SIGPIPE, true);
3366    set_signal_handler(SIGBUS, true);
3367    set_signal_handler(SIGILL, true);
3368    set_signal_handler(SIGFPE, true);
3369    set_signal_handler(SIGTRAP, true);
3370    set_signal_handler(SIGXFSZ, true);
3371    set_signal_handler(SIGDANGER, true);
3372
3373    if (libjsig_is_loaded) {
3374      // Tell libjsig jvm finishes setting signal handlers
3375      (*end_signal_setting)();
3376    }
3377
3378    // We don't activate signal checker if libjsig is in place, we trust ourselves
3379    // and if UserSignalHandler is installed all bets are off.
3380    // Log that signal checking is off only if -verbose:jni is specified.
3381    if (CheckJNICalls) {
3382      if (libjsig_is_loaded) {
3383        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3384        check_signals = false;
3385      }
3386      if (AllowUserSignalHandlers) {
3387        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3388        check_signals = false;
3389      }
3390      // need to initialize check_signal_done
3391      ::sigemptyset(&check_signal_done);
3392    }
3393  }
3394}
3395
3396static const char* get_signal_handler_name(address handler,
3397                                           char* buf, int buflen) {
3398  int offset;
3399  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3400  if (found) {
3401    // skip directory names
3402    const char *p1, *p2;
3403    p1 = buf;
3404    size_t len = strlen(os::file_separator());
3405    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3406    // The way os::dll_address_to_library_name is implemented on Aix
3407    // right now, it always returns -1 for the offset which is not
3408    // terribly informative.
3409    // Will fix that. For now, omit the offset.
3410    jio_snprintf(buf, buflen, "%s", p1);
3411  } else {
3412    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3413  }
3414  return buf;
3415}
3416
3417static void print_signal_handler(outputStream* st, int sig,
3418                                 char* buf, size_t buflen) {
3419  struct sigaction sa;
3420  sigaction(sig, NULL, &sa);
3421
3422  st->print("%s: ", os::exception_name(sig, buf, buflen));
3423
3424  address handler = (sa.sa_flags & SA_SIGINFO)
3425    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3426    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3427
3428  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3429    st->print("SIG_DFL");
3430  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3431    st->print("SIG_IGN");
3432  } else {
3433    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3434  }
3435
3436  // Print readable mask.
3437  st->print(", sa_mask[0]=");
3438  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3439
3440  address rh = VMError::get_resetted_sighandler(sig);
3441  // May be, handler was resetted by VMError?
3442  if (rh != NULL) {
3443    handler = rh;
3444    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3445  }
3446
3447  // Print textual representation of sa_flags.
3448  st->print(", sa_flags=");
3449  os::Posix::print_sa_flags(st, sa.sa_flags);
3450
3451  // Check: is it our handler?
3452  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3453      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3454    // It is our signal handler.
3455    // Check for flags, reset system-used one!
3456    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3457      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3458                os::Aix::get_our_sigflags(sig));
3459    }
3460  }
3461  st->cr();
3462}
3463
3464
3465#define DO_SIGNAL_CHECK(sig) \
3466  if (!sigismember(&check_signal_done, sig)) \
3467    os::Aix::check_signal_handler(sig)
3468
3469// This method is a periodic task to check for misbehaving JNI applications
3470// under CheckJNI, we can add any periodic checks here
3471
3472void os::run_periodic_checks() {
3473
3474  if (check_signals == false) return;
3475
3476  // SEGV and BUS if overridden could potentially prevent
3477  // generation of hs*.log in the event of a crash, debugging
3478  // such a case can be very challenging, so we absolutely
3479  // check the following for a good measure:
3480  DO_SIGNAL_CHECK(SIGSEGV);
3481  DO_SIGNAL_CHECK(SIGILL);
3482  DO_SIGNAL_CHECK(SIGFPE);
3483  DO_SIGNAL_CHECK(SIGBUS);
3484  DO_SIGNAL_CHECK(SIGPIPE);
3485  DO_SIGNAL_CHECK(SIGXFSZ);
3486  if (UseSIGTRAP) {
3487    DO_SIGNAL_CHECK(SIGTRAP);
3488  }
3489  DO_SIGNAL_CHECK(SIGDANGER);
3490
3491  // ReduceSignalUsage allows the user to override these handlers
3492  // see comments at the very top and jvm_solaris.h
3493  if (!ReduceSignalUsage) {
3494    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3495    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3496    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3497    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3498  }
3499
3500  DO_SIGNAL_CHECK(SR_signum);
3501  DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3502}
3503
3504typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3505
3506static os_sigaction_t os_sigaction = NULL;
3507
3508void os::Aix::check_signal_handler(int sig) {
3509  char buf[O_BUFLEN];
3510  address jvmHandler = NULL;
3511
3512  struct sigaction act;
3513  if (os_sigaction == NULL) {
3514    // only trust the default sigaction, in case it has been interposed
3515    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3516    if (os_sigaction == NULL) return;
3517  }
3518
3519  os_sigaction(sig, (struct sigaction*)NULL, &act);
3520
3521  address thisHandler = (act.sa_flags & SA_SIGINFO)
3522    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3523    : CAST_FROM_FN_PTR(address, act.sa_handler);
3524
3525
3526  switch(sig) {
3527  case SIGSEGV:
3528  case SIGBUS:
3529  case SIGFPE:
3530  case SIGPIPE:
3531  case SIGILL:
3532  case SIGXFSZ:
3533    // Renamed 'signalHandler' to avoid collision with other shared libs.
3534    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3535    break;
3536
3537  case SHUTDOWN1_SIGNAL:
3538  case SHUTDOWN2_SIGNAL:
3539  case SHUTDOWN3_SIGNAL:
3540  case BREAK_SIGNAL:
3541    jvmHandler = (address)user_handler();
3542    break;
3543
3544  case INTERRUPT_SIGNAL:
3545    jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3546    break;
3547
3548  default:
3549    if (sig == SR_signum) {
3550      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3551    } else {
3552      return;
3553    }
3554    break;
3555  }
3556
3557  if (thisHandler != jvmHandler) {
3558    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3559    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3560    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3561    // No need to check this sig any longer
3562    sigaddset(&check_signal_done, sig);
3563    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3564    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3565      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3566                    exception_name(sig, buf, O_BUFLEN));
3567    }
3568  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3569    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3570    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3571    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3572    // No need to check this sig any longer
3573    sigaddset(&check_signal_done, sig);
3574  }
3575
3576  // Dump all the signal
3577  if (sigismember(&check_signal_done, sig)) {
3578    print_signal_handlers(tty, buf, O_BUFLEN);
3579  }
3580}
3581
3582extern bool signal_name(int signo, char* buf, size_t len);
3583
3584const char* os::exception_name(int exception_code, char* buf, size_t size) {
3585  if (0 < exception_code && exception_code <= SIGRTMAX) {
3586    // signal
3587    if (!signal_name(exception_code, buf, size)) {
3588      jio_snprintf(buf, size, "SIG%d", exception_code);
3589    }
3590    return buf;
3591  } else {
3592    return NULL;
3593  }
3594}
3595
3596// To install functions for atexit system call
3597extern "C" {
3598  static void perfMemory_exit_helper() {
3599    perfMemory_exit();
3600  }
3601}
3602
3603// This is called _before_ the most of global arguments have been parsed.
3604void os::init(void) {
3605  // This is basic, we want to know if that ever changes.
3606  // (shared memory boundary is supposed to be a 256M aligned)
3607  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3608
3609  // First off, we need to know whether we run on AIX or PASE, and
3610  // the OS level we run on.
3611  os::Aix::initialize_os_info();
3612
3613  // Scan environment (SPEC1170 behaviour, etc)
3614  os::Aix::scan_environment();
3615
3616  // Check which pages are supported by AIX.
3617  os::Aix::query_multipage_support();
3618
3619  // Next, we need to initialize libo4 and libperfstat libraries.
3620  if (os::Aix::on_pase()) {
3621    os::Aix::initialize_libo4();
3622  } else {
3623    os::Aix::initialize_libperfstat();
3624  }
3625
3626  // Reset the perfstat information provided by ODM.
3627  if (os::Aix::on_aix()) {
3628    libperfstat::perfstat_reset();
3629  }
3630
3631  // Now initialze basic system properties. Note that for some of the values we
3632  // need libperfstat etc.
3633  os::Aix::initialize_system_info();
3634
3635  // Initialize large page support.
3636  if (UseLargePages) {
3637    os::large_page_init();
3638    if (!UseLargePages) {
3639      // initialize os::_page_sizes
3640      _page_sizes[0] = Aix::page_size();
3641      _page_sizes[1] = 0;
3642      if (Verbose) {
3643        fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3644      }
3645    }
3646  } else {
3647    // initialize os::_page_sizes
3648    _page_sizes[0] = Aix::page_size();
3649    _page_sizes[1] = 0;
3650  }
3651
3652  // debug trace
3653  if (Verbose) {
3654    fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3655    fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3656    fprintf(stderr, "os::_page_sizes = ( ");
3657    for (int i = 0; _page_sizes[i]; i ++) {
3658      fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3659    }
3660    fprintf(stderr, ")\n");
3661  }
3662
3663  _initial_pid = getpid();
3664
3665  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3666
3667  init_random(1234567);
3668
3669  ThreadCritical::initialize();
3670
3671  // Main_thread points to the aboriginal thread.
3672  Aix::_main_thread = pthread_self();
3673
3674  initial_time_count = os::elapsed_counter();
3675  pthread_mutex_init(&dl_mutex, NULL);
3676}
3677
3678// this is called _after_ the global arguments have been parsed
3679jint os::init_2(void) {
3680
3681  if (Verbose) {
3682    fprintf(stderr, "processor count: %d\n", os::_processor_count);
3683    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
3684  }
3685
3686  // initially build up the loaded dll map
3687  LoadedLibraries::reload();
3688
3689  const int page_size = Aix::page_size();
3690  const int map_size = page_size;
3691
3692  address map_address = (address) MAP_FAILED;
3693  const int prot  = PROT_READ;
3694  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3695
3696  // use optimized addresses for the polling page,
3697  // e.g. map it to a special 32-bit address.
3698  if (OptimizePollingPageLocation) {
3699    // architecture-specific list of address wishes:
3700    address address_wishes[] = {
3701      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3702      // PPC64: all address wishes are non-negative 32 bit values where
3703      // the lower 16 bits are all zero. we can load these addresses
3704      // with a single ppc_lis instruction.
3705      (address) 0x30000000, (address) 0x31000000,
3706      (address) 0x32000000, (address) 0x33000000,
3707      (address) 0x40000000, (address) 0x41000000,
3708      (address) 0x42000000, (address) 0x43000000,
3709      (address) 0x50000000, (address) 0x51000000,
3710      (address) 0x52000000, (address) 0x53000000,
3711      (address) 0x60000000, (address) 0x61000000,
3712      (address) 0x62000000, (address) 0x63000000
3713    };
3714    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3715
3716    // iterate over the list of address wishes:
3717    for (int i=0; i<address_wishes_length; i++) {
3718      // try to map with current address wish.
3719      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3720      // fail if the address is already mapped.
3721      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3722                                     map_size, prot,
3723                                     flags | MAP_FIXED,
3724                                     -1, 0);
3725      if (Verbose) {
3726        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3727                address_wishes[i], map_address + (ssize_t)page_size);
3728      }
3729
3730      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3731        // map succeeded and map_address is at wished address, exit loop.
3732        break;
3733      }
3734
3735      if (map_address != (address) MAP_FAILED) {
3736        // map succeeded, but polling_page is not at wished address, unmap and continue.
3737        ::munmap(map_address, map_size);
3738        map_address = (address) MAP_FAILED;
3739      }
3740      // map failed, continue loop.
3741    }
3742  } // end OptimizePollingPageLocation
3743
3744  if (map_address == (address) MAP_FAILED) {
3745    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3746  }
3747  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3748  os::set_polling_page(map_address);
3749
3750  if (!UseMembar) {
3751    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3752    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3753    os::set_memory_serialize_page(mem_serialize_page);
3754
3755#ifndef PRODUCT
3756    if (Verbose && PrintMiscellaneous)
3757      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3758#endif
3759  }
3760
3761  // initialize suspend/resume support - must do this before signal_sets_init()
3762  if (SR_initialize() != 0) {
3763    perror("SR_initialize failed");
3764    return JNI_ERR;
3765  }
3766
3767  Aix::signal_sets_init();
3768  Aix::install_signal_handlers();
3769
3770  // Check minimum allowable stack size for thread creation and to initialize
3771  // the java system classes, including StackOverflowError - depends on page
3772  // size. Add a page for compiler2 recursion in main thread.
3773  // Add in 2*BytesPerWord times page size to account for VM stack during
3774  // class initialization depending on 32 or 64 bit VM.
3775  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3776            (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3777                     2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3778
3779  size_t threadStackSizeInBytes = ThreadStackSize * K;
3780  if (threadStackSizeInBytes != 0 &&
3781      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3782        tty->print_cr("\nThe stack size specified is too small, "
3783                      "Specify at least %dk",
3784                      os::Aix::min_stack_allowed / K);
3785        return JNI_ERR;
3786  }
3787
3788  // Make the stack size a multiple of the page size so that
3789  // the yellow/red zones can be guarded.
3790  // note that this can be 0, if no default stacksize was set
3791  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3792
3793  Aix::libpthread_init();
3794
3795  if (MaxFDLimit) {
3796    // set the number of file descriptors to max. print out error
3797    // if getrlimit/setrlimit fails but continue regardless.
3798    struct rlimit nbr_files;
3799    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3800    if (status != 0) {
3801      if (PrintMiscellaneous && (Verbose || WizardMode))
3802        perror("os::init_2 getrlimit failed");
3803    } else {
3804      nbr_files.rlim_cur = nbr_files.rlim_max;
3805      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3806      if (status != 0) {
3807        if (PrintMiscellaneous && (Verbose || WizardMode))
3808          perror("os::init_2 setrlimit failed");
3809      }
3810    }
3811  }
3812
3813  if (PerfAllowAtExitRegistration) {
3814    // only register atexit functions if PerfAllowAtExitRegistration is set.
3815    // atexit functions can be delayed until process exit time, which
3816    // can be problematic for embedded VM situations. Embedded VMs should
3817    // call DestroyJavaVM() to assure that VM resources are released.
3818
3819    // note: perfMemory_exit_helper atexit function may be removed in
3820    // the future if the appropriate cleanup code can be added to the
3821    // VM_Exit VMOperation's doit method.
3822    if (atexit(perfMemory_exit_helper) != 0) {
3823      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3824    }
3825  }
3826
3827  return JNI_OK;
3828}
3829
3830// Mark the polling page as unreadable
3831void os::make_polling_page_unreadable(void) {
3832  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3833    fatal("Could not disable polling page");
3834  }
3835};
3836
3837// Mark the polling page as readable
3838void os::make_polling_page_readable(void) {
3839  // Changed according to os_linux.cpp.
3840  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3841    fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3842  }
3843};
3844
3845int os::active_processor_count() {
3846  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3847  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3848  return online_cpus;
3849}
3850
3851void os::set_native_thread_name(const char *name) {
3852  // Not yet implemented.
3853  return;
3854}
3855
3856bool os::distribute_processes(uint length, uint* distribution) {
3857  // Not yet implemented.
3858  return false;
3859}
3860
3861bool os::bind_to_processor(uint processor_id) {
3862  // Not yet implemented.
3863  return false;
3864}
3865
3866void os::SuspendedThreadTask::internal_do_task() {
3867  if (do_suspend(_thread->osthread())) {
3868    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3869    do_task(context);
3870    do_resume(_thread->osthread());
3871  }
3872}
3873
3874class PcFetcher : public os::SuspendedThreadTask {
3875public:
3876  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3877  ExtendedPC result();
3878protected:
3879  void do_task(const os::SuspendedThreadTaskContext& context);
3880private:
3881  ExtendedPC _epc;
3882};
3883
3884ExtendedPC PcFetcher::result() {
3885  guarantee(is_done(), "task is not done yet.");
3886  return _epc;
3887}
3888
3889void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3890  Thread* thread = context.thread();
3891  OSThread* osthread = thread->osthread();
3892  if (osthread->ucontext() != NULL) {
3893    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3894  } else {
3895    // NULL context is unexpected, double-check this is the VMThread.
3896    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3897  }
3898}
3899
3900// Suspends the target using the signal mechanism and then grabs the PC before
3901// resuming the target. Used by the flat-profiler only
3902ExtendedPC os::get_thread_pc(Thread* thread) {
3903  // Make sure that it is called by the watcher for the VMThread.
3904  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3905  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3906
3907  PcFetcher fetcher(thread);
3908  fetcher.run();
3909  return fetcher.result();
3910}
3911
3912// Not neede on Aix.
3913// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
3914// }
3915
3916////////////////////////////////////////////////////////////////////////////////
3917// debug support
3918
3919static address same_page(address x, address y) {
3920  intptr_t page_bits = -os::vm_page_size();
3921  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3922    return x;
3923  else if (x > y)
3924    return (address)(intptr_t(y) | ~page_bits) + 1;
3925  else
3926    return (address)(intptr_t(y) & page_bits);
3927}
3928
3929bool os::find(address addr, outputStream* st) {
3930
3931  st->print(PTR_FORMAT ": ", addr);
3932
3933  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3934  if (lib) {
3935    lib->print(st);
3936    return true;
3937  } else {
3938    lib = LoadedLibraries::find_for_data_address(addr);
3939    if (lib) {
3940      lib->print(st);
3941      return true;
3942    } else {
3943      st->print_cr("(outside any module)");
3944    }
3945  }
3946
3947  return false;
3948}
3949
3950////////////////////////////////////////////////////////////////////////////////
3951// misc
3952
3953// This does not do anything on Aix. This is basically a hook for being
3954// able to use structured exception handling (thread-local exception filters)
3955// on, e.g., Win32.
3956void
3957os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3958                         JavaCallArguments* args, Thread* thread) {
3959  f(value, method, args, thread);
3960}
3961
3962void os::print_statistics() {
3963}
3964
3965int os::message_box(const char* title, const char* message) {
3966  int i;
3967  fdStream err(defaultStream::error_fd());
3968  for (i = 0; i < 78; i++) err.print_raw("=");
3969  err.cr();
3970  err.print_raw_cr(title);
3971  for (i = 0; i < 78; i++) err.print_raw("-");
3972  err.cr();
3973  err.print_raw_cr(message);
3974  for (i = 0; i < 78; i++) err.print_raw("=");
3975  err.cr();
3976
3977  char buf[16];
3978  // Prevent process from exiting upon "read error" without consuming all CPU
3979  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3980
3981  return buf[0] == 'y' || buf[0] == 'Y';
3982}
3983
3984int os::stat(const char *path, struct stat *sbuf) {
3985  char pathbuf[MAX_PATH];
3986  if (strlen(path) > MAX_PATH - 1) {
3987    errno = ENAMETOOLONG;
3988    return -1;
3989  }
3990  os::native_path(strcpy(pathbuf, path));
3991  return ::stat(pathbuf, sbuf);
3992}
3993
3994bool os::check_heap(bool force) {
3995  return true;
3996}
3997
3998// Is a (classpath) directory empty?
3999bool os::dir_is_empty(const char* path) {
4000  DIR *dir = NULL;
4001  struct dirent *ptr;
4002
4003  dir = opendir(path);
4004  if (dir == NULL) return true;
4005
4006  /* Scan the directory */
4007  bool result = true;
4008  char buf[sizeof(struct dirent) + MAX_PATH];
4009  while (result && (ptr = ::readdir(dir)) != NULL) {
4010    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4011      result = false;
4012    }
4013  }
4014  closedir(dir);
4015  return result;
4016}
4017
4018// This code originates from JDK's sysOpen and open64_w
4019// from src/solaris/hpi/src/system_md.c
4020
4021int os::open(const char *path, int oflag, int mode) {
4022
4023  if (strlen(path) > MAX_PATH - 1) {
4024    errno = ENAMETOOLONG;
4025    return -1;
4026  }
4027  int fd;
4028
4029  fd = ::open64(path, oflag, mode);
4030  if (fd == -1) return -1;
4031
4032  // If the open succeeded, the file might still be a directory.
4033  {
4034    struct stat64 buf64;
4035    int ret = ::fstat64(fd, &buf64);
4036    int st_mode = buf64.st_mode;
4037
4038    if (ret != -1) {
4039      if ((st_mode & S_IFMT) == S_IFDIR) {
4040        errno = EISDIR;
4041        ::close(fd);
4042        return -1;
4043      }
4044    } else {
4045      ::close(fd);
4046      return -1;
4047    }
4048  }
4049
4050  // All file descriptors that are opened in the JVM and not
4051  // specifically destined for a subprocess should have the
4052  // close-on-exec flag set. If we don't set it, then careless 3rd
4053  // party native code might fork and exec without closing all
4054  // appropriate file descriptors (e.g. as we do in closeDescriptors in
4055  // UNIXProcess.c), and this in turn might:
4056  //
4057  // - cause end-of-file to fail to be detected on some file
4058  //   descriptors, resulting in mysterious hangs, or
4059  //
4060  // - might cause an fopen in the subprocess to fail on a system
4061  //   suffering from bug 1085341.
4062  //
4063  // (Yes, the default setting of the close-on-exec flag is a Unix
4064  // design flaw.)
4065  //
4066  // See:
4067  // 1085341: 32-bit stdio routines should support file descriptors >255
4068  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4069  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4070#ifdef FD_CLOEXEC
4071  {
4072    int flags = ::fcntl(fd, F_GETFD);
4073    if (flags != -1)
4074      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4075  }
4076#endif
4077
4078  return fd;
4079}
4080
4081
4082// create binary file, rewriting existing file if required
4083int os::create_binary_file(const char* path, bool rewrite_existing) {
4084  int oflags = O_WRONLY | O_CREAT;
4085  if (!rewrite_existing) {
4086    oflags |= O_EXCL;
4087  }
4088  return ::open64(path, oflags, S_IREAD | S_IWRITE);
4089}
4090
4091// return current position of file pointer
4092jlong os::current_file_offset(int fd) {
4093  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4094}
4095
4096// move file pointer to the specified offset
4097jlong os::seek_to_file_offset(int fd, jlong offset) {
4098  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4099}
4100
4101// This code originates from JDK's sysAvailable
4102// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4103
4104int os::available(int fd, jlong *bytes) {
4105  jlong cur, end;
4106  int mode;
4107  struct stat64 buf64;
4108
4109  if (::fstat64(fd, &buf64) >= 0) {
4110    mode = buf64.st_mode;
4111    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4112      // XXX: is the following call interruptible? If so, this might
4113      // need to go through the INTERRUPT_IO() wrapper as for other
4114      // blocking, interruptible calls in this file.
4115      int n;
4116      if (::ioctl(fd, FIONREAD, &n) >= 0) {
4117        *bytes = n;
4118        return 1;
4119      }
4120    }
4121  }
4122  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4123    return 0;
4124  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4125    return 0;
4126  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4127    return 0;
4128  }
4129  *bytes = end - cur;
4130  return 1;
4131}
4132
4133// Map a block of memory.
4134char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4135                        char *addr, size_t bytes, bool read_only,
4136                        bool allow_exec) {
4137  Unimplemented();
4138  return NULL;
4139}
4140
4141
4142// Remap a block of memory.
4143char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4144                          char *addr, size_t bytes, bool read_only,
4145                          bool allow_exec) {
4146  // same as map_memory() on this OS
4147  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4148                        allow_exec);
4149}
4150
4151// Unmap a block of memory.
4152bool os::pd_unmap_memory(char* addr, size_t bytes) {
4153  return munmap(addr, bytes) == 0;
4154}
4155
4156// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4157// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4158// of a thread.
4159//
4160// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4161// the fast estimate available on the platform.
4162
4163jlong os::current_thread_cpu_time() {
4164  // return user + sys since the cost is the same
4165  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4166  assert(n >= 0, "negative CPU time");
4167  return n;
4168}
4169
4170jlong os::thread_cpu_time(Thread* thread) {
4171  // consistent with what current_thread_cpu_time() returns
4172  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4173  assert(n >= 0, "negative CPU time");
4174  return n;
4175}
4176
4177jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4178  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4179  assert(n >= 0, "negative CPU time");
4180  return n;
4181}
4182
4183static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4184  bool error = false;
4185
4186  jlong sys_time = 0;
4187  jlong user_time = 0;
4188
4189  // reimplemented using getthrds64().
4190  //
4191  // goes like this:
4192  // For the thread in question, get the kernel thread id. Then get the
4193  // kernel thread statistics using that id.
4194  //
4195  // This only works of course when no pthread scheduling is used,
4196  // ie there is a 1:1 relationship to kernel threads.
4197  // On AIX, see AIXTHREAD_SCOPE variable.
4198
4199  pthread_t pthtid = thread->osthread()->pthread_id();
4200
4201  // retrieve kernel thread id for the pthread:
4202  tid64_t tid = 0;
4203  struct __pthrdsinfo pinfo;
4204  // I just love those otherworldly IBM APIs which force me to hand down
4205  // dummy buffers for stuff I dont care for...
4206  char dummy[1];
4207  int dummy_size = sizeof(dummy);
4208  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4209                          dummy, &dummy_size) == 0) {
4210    tid = pinfo.__pi_tid;
4211  } else {
4212    tty->print_cr("pthread_getthrds_np failed.");
4213    error = true;
4214  }
4215
4216  // retrieve kernel timing info for that kernel thread
4217  if (!error) {
4218    struct thrdentry64 thrdentry;
4219    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4220      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4221      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4222    } else {
4223      tty->print_cr("pthread_getthrds_np failed.");
4224      error = true;
4225    }
4226  }
4227
4228  if (p_sys_time) {
4229    *p_sys_time = sys_time;
4230  }
4231
4232  if (p_user_time) {
4233    *p_user_time = user_time;
4234  }
4235
4236  if (error) {
4237    return false;
4238  }
4239
4240  return true;
4241}
4242
4243jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4244  jlong sys_time;
4245  jlong user_time;
4246
4247  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4248    return -1;
4249  }
4250
4251  return user_sys_cpu_time ? sys_time + user_time : user_time;
4252}
4253
4254void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4255  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4256  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4257  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4258  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4259}
4260
4261void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4262  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4263  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4264  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4265  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4266}
4267
4268bool os::is_thread_cpu_time_supported() {
4269  return true;
4270}
4271
4272// System loadavg support. Returns -1 if load average cannot be obtained.
4273// For now just return the system wide load average (no processor sets).
4274int os::loadavg(double values[], int nelem) {
4275
4276  // Implemented using libperfstat on AIX.
4277
4278  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4279  guarantee(values, "argument error");
4280
4281  if (os::Aix::on_pase()) {
4282    Unimplemented();
4283    return -1;
4284  } else {
4285    // AIX: use libperfstat
4286    //
4287    // See also:
4288    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4289    // /usr/include/libperfstat.h:
4290
4291    // Use the already AIX version independent get_cpuinfo.
4292    os::Aix::cpuinfo_t ci;
4293    if (os::Aix::get_cpuinfo(&ci)) {
4294      for (int i = 0; i < nelem; i++) {
4295        values[i] = ci.loadavg[i];
4296      }
4297    } else {
4298      return -1;
4299    }
4300    return nelem;
4301  }
4302}
4303
4304void os::pause() {
4305  char filename[MAX_PATH];
4306  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4307    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4308  } else {
4309    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4310  }
4311
4312  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4313  if (fd != -1) {
4314    struct stat buf;
4315    ::close(fd);
4316    while (::stat(filename, &buf) == 0) {
4317      (void)::poll(NULL, 0, 100);
4318    }
4319  } else {
4320    jio_fprintf(stderr,
4321      "Could not open pause file '%s', continuing immediately.\n", filename);
4322  }
4323}
4324
4325bool os::Aix::is_primordial_thread() {
4326  if (pthread_self() == (pthread_t)1) {
4327    return true;
4328  } else {
4329    return false;
4330  }
4331}
4332
4333// OS recognitions (PASE/AIX, OS level) call this before calling any
4334// one of Aix::on_pase(), Aix::os_version() static
4335void os::Aix::initialize_os_info() {
4336
4337  assert(_on_pase == -1 && _os_version == -1, "already called.");
4338
4339  struct utsname uts;
4340  memset(&uts, 0, sizeof(uts));
4341  strcpy(uts.sysname, "?");
4342  if (::uname(&uts) == -1) {
4343    fprintf(stderr, "uname failed (%d)\n", errno);
4344    guarantee(0, "Could not determine whether we run on AIX or PASE");
4345  } else {
4346    if (Verbose) {
4347      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4348              "node \"%s\" machine \"%s\"\n",
4349              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4350    }
4351    const int major = atoi(uts.version);
4352    assert(major > 0, "invalid OS version");
4353    const int minor = atoi(uts.release);
4354    assert(minor > 0, "invalid OS release");
4355    _os_version = (major << 8) | minor;
4356    if (strcmp(uts.sysname, "OS400") == 0) {
4357      Unimplemented();
4358    } else if (strcmp(uts.sysname, "AIX") == 0) {
4359      // We run on AIX. We do not support versions older than AIX 5.3.
4360      _on_pase = 0;
4361      if (_os_version < 0x0503) {
4362        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
4363        assert(false, "AIX release too old.");
4364      } else {
4365        if (Verbose) {
4366          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
4367        }
4368      }
4369    } else {
4370      assert(false, "unknown OS");
4371    }
4372  }
4373
4374  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4375
4376} // end: os::Aix::initialize_os_info()
4377
4378// Scan environment for important settings which might effect the VM.
4379// Trace out settings. Warn about invalid settings and/or correct them.
4380//
4381// Must run after os::Aix::initialue_os_info().
4382void os::Aix::scan_environment() {
4383
4384  char* p;
4385  int rc;
4386
4387  // Warn explicity if EXTSHM=ON is used. That switch changes how
4388  // System V shared memory behaves. One effect is that page size of
4389  // shared memory cannot be change dynamically, effectivly preventing
4390  // large pages from working.
4391  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4392  // recommendation is (in OSS notes) to switch it off.
4393  p = ::getenv("EXTSHM");
4394  if (Verbose) {
4395    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4396  }
4397  if (p && strcmp(p, "ON") == 0) {
4398    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4399    _extshm = 1;
4400  } else {
4401    _extshm = 0;
4402  }
4403
4404  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4405  // Not tested, not supported.
4406  //
4407  // Note that it might be worth the trouble to test and to require it, if only to
4408  // get useful return codes for mprotect.
4409  //
4410  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4411  // exec() ? before loading the libjvm ? ....)
4412  p = ::getenv("XPG_SUS_ENV");
4413  if (Verbose) {
4414    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
4415  }
4416  if (p && strcmp(p, "ON") == 0) {
4417    _xpg_sus_mode = 1;
4418    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
4419    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4420    // clobber address ranges. If we ever want to support that, we have to do some
4421    // testing first.
4422    guarantee(false, "XPG_SUS_ENV=ON not supported");
4423  } else {
4424    _xpg_sus_mode = 0;
4425  }
4426
4427  // Switch off AIX internal (pthread) guard pages. This has
4428  // immediate effect for any pthread_create calls which follow.
4429  p = ::getenv("AIXTHREAD_GUARDPAGES");
4430  if (Verbose) {
4431    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
4432    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
4433  }
4434  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4435  guarantee(rc == 0, "");
4436
4437} // end: os::Aix::scan_environment()
4438
4439// PASE: initialize the libo4 library (AS400 PASE porting library).
4440void os::Aix::initialize_libo4() {
4441  Unimplemented();
4442}
4443
4444// AIX: initialize the libperfstat library (we load this dynamically
4445// because it is only available on AIX.
4446void os::Aix::initialize_libperfstat() {
4447
4448  assert(os::Aix::on_aix(), "AIX only");
4449
4450  if (!libperfstat::init()) {
4451    fprintf(stderr, "libperfstat initialization failed.\n");
4452    assert(false, "libperfstat initialization failed");
4453  } else {
4454    if (Verbose) {
4455      fprintf(stderr, "libperfstat initialized.\n");
4456    }
4457  }
4458} // end: os::Aix::initialize_libperfstat
4459
4460/////////////////////////////////////////////////////////////////////////////
4461// thread stack
4462
4463// function to query the current stack size using pthread_getthrds_np
4464//
4465// ! do not change anything here unless you know what you are doing !
4466static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4467
4468  // This only works when invoked on a pthread. As we agreed not to use
4469  // primordial threads anyway, I assert here
4470  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4471
4472  // information about this api can be found (a) in the pthread.h header and
4473  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4474  //
4475  // The use of this API to find out the current stack is kind of undefined.
4476  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4477  // enough for cases where I let the pthread library create its stacks. For cases
4478  // where I create an own stack and pass this to pthread_create, it seems not to
4479  // work (the returned stack size in that case is 0).
4480
4481  pthread_t tid = pthread_self();
4482  struct __pthrdsinfo pinfo;
4483  char dummy[1]; // we only need this to satisfy the api and to not get E
4484  int dummy_size = sizeof(dummy);
4485
4486  memset(&pinfo, 0, sizeof(pinfo));
4487
4488  const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4489                                      sizeof(pinfo), dummy, &dummy_size);
4490
4491  if (rc != 0) {
4492    fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4493    guarantee(0, "pthread_getthrds_np failed");
4494  }
4495
4496  guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4497
4498  // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4499  // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4500  // Not sure what to do here - I feel inclined to forbid this use case completely.
4501  guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4502
4503  // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4504  if (p_stack_base) {
4505    (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4506  }
4507
4508  if (p_stack_size) {
4509    (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4510  }
4511
4512#ifndef PRODUCT
4513  if (Verbose) {
4514    fprintf(stderr,
4515            "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4516            ", real stack_size=" INTPTR_FORMAT
4517            ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4518            (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4519            (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4520            pinfo.__pi_stacksize - os::Aix::stack_page_size());
4521  }
4522#endif
4523
4524} // end query_stack_dimensions
4525
4526// get the current stack base from the OS (actually, the pthread library)
4527address os::current_stack_base() {
4528  address p;
4529  query_stack_dimensions(&p, 0);
4530  return p;
4531}
4532
4533// get the current stack size from the OS (actually, the pthread library)
4534size_t os::current_stack_size() {
4535  size_t s;
4536  query_stack_dimensions(0, &s);
4537  return s;
4538}
4539
4540// Refer to the comments in os_solaris.cpp park-unpark.
4541//
4542// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4543// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4544// For specifics regarding the bug see GLIBC BUGID 261237 :
4545//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4546// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4547// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4548// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4549// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4550// and monitorenter when we're using 1-0 locking. All those operations may result in
4551// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4552// of libpthread avoids the problem, but isn't practical.
4553//
4554// Possible remedies:
4555//
4556// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4557//      This is palliative and probabilistic, however. If the thread is preempted
4558//      between the call to compute_abstime() and pthread_cond_timedwait(), more
4559//      than the minimum period may have passed, and the abstime may be stale (in the
4560//      past) resultin in a hang. Using this technique reduces the odds of a hang
4561//      but the JVM is still vulnerable, particularly on heavily loaded systems.
4562//
4563// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4564//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4565//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4566//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4567//      thread.
4568//
4569// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4570//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4571//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4572//      This also works well. In fact it avoids kernel-level scalability impediments
4573//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4574//      timers in a graceful fashion.
4575//
4576// 4.   When the abstime value is in the past it appears that control returns
4577//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4578//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4579//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4580//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4581//      It may be possible to avoid reinitialization by checking the return
4582//      value from pthread_cond_timedwait(). In addition to reinitializing the
4583//      condvar we must establish the invariant that cond_signal() is only called
4584//      within critical sections protected by the adjunct mutex. This prevents
4585//      cond_signal() from "seeing" a condvar that's in the midst of being
4586//      reinitialized or that is corrupt. Sadly, this invariant obviates the
4587//      desirable signal-after-unlock optimization that avoids futile context switching.
4588//
4589//      I'm also concerned that some versions of NTPL might allocate an auxilliary
4590//      structure when a condvar is used or initialized. cond_destroy() would
4591//      release the helper structure. Our reinitialize-after-timedwait fix
4592//      put excessive stress on malloc/free and locks protecting the c-heap.
4593//
4594// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4595// It may be possible to refine (4) by checking the kernel and NTPL verisons
4596// and only enabling the work-around for vulnerable environments.
4597
4598// utility to compute the abstime argument to timedwait:
4599// millis is the relative timeout time
4600// abstime will be the absolute timeout time
4601// TODO: replace compute_abstime() with unpackTime()
4602
4603static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4604  if (millis < 0) millis = 0;
4605  struct timeval now;
4606  int status = gettimeofday(&now, NULL);
4607  assert(status == 0, "gettimeofday");
4608  jlong seconds = millis / 1000;
4609  millis %= 1000;
4610  if (seconds > 50000000) { // see man cond_timedwait(3T)
4611    seconds = 50000000;
4612  }
4613  abstime->tv_sec = now.tv_sec  + seconds;
4614  long       usec = now.tv_usec + millis * 1000;
4615  if (usec >= 1000000) {
4616    abstime->tv_sec += 1;
4617    usec -= 1000000;
4618  }
4619  abstime->tv_nsec = usec * 1000;
4620  return abstime;
4621}
4622
4623
4624// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4625// Conceptually TryPark() should be equivalent to park(0).
4626
4627int os::PlatformEvent::TryPark() {
4628  for (;;) {
4629    const int v = _Event;
4630    guarantee ((v == 0) || (v == 1), "invariant");
4631    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4632  }
4633}
4634
4635void os::PlatformEvent::park() {       // AKA "down()"
4636  // Invariant: Only the thread associated with the Event/PlatformEvent
4637  // may call park().
4638  // TODO: assert that _Assoc != NULL or _Assoc == Self
4639  int v;
4640  for (;;) {
4641    v = _Event;
4642    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4643  }
4644  guarantee (v >= 0, "invariant");
4645  if (v == 0) {
4646    // Do this the hard way by blocking ...
4647    int status = pthread_mutex_lock(_mutex);
4648    assert_status(status == 0, status, "mutex_lock");
4649    guarantee (_nParked == 0, "invariant");
4650    ++ _nParked;
4651    while (_Event < 0) {
4652      status = pthread_cond_wait(_cond, _mutex);
4653      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4654    }
4655    -- _nParked;
4656
4657    // In theory we could move the ST of 0 into _Event past the unlock(),
4658    // but then we'd need a MEMBAR after the ST.
4659    _Event = 0;
4660    status = pthread_mutex_unlock(_mutex);
4661    assert_status(status == 0, status, "mutex_unlock");
4662  }
4663  guarantee (_Event >= 0, "invariant");
4664}
4665
4666int os::PlatformEvent::park(jlong millis) {
4667  guarantee (_nParked == 0, "invariant");
4668
4669  int v;
4670  for (;;) {
4671    v = _Event;
4672    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4673  }
4674  guarantee (v >= 0, "invariant");
4675  if (v != 0) return OS_OK;
4676
4677  // We do this the hard way, by blocking the thread.
4678  // Consider enforcing a minimum timeout value.
4679  struct timespec abst;
4680  compute_abstime(&abst, millis);
4681
4682  int ret = OS_TIMEOUT;
4683  int status = pthread_mutex_lock(_mutex);
4684  assert_status(status == 0, status, "mutex_lock");
4685  guarantee (_nParked == 0, "invariant");
4686  ++_nParked;
4687
4688  // Object.wait(timo) will return because of
4689  // (a) notification
4690  // (b) timeout
4691  // (c) thread.interrupt
4692  //
4693  // Thread.interrupt and object.notify{All} both call Event::set.
4694  // That is, we treat thread.interrupt as a special case of notification.
4695  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4696  // We assume all ETIME returns are valid.
4697  //
4698  // TODO: properly differentiate simultaneous notify+interrupt.
4699  // In that case, we should propagate the notify to another waiter.
4700
4701  while (_Event < 0) {
4702    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4703    assert_status(status == 0 || status == ETIMEDOUT,
4704          status, "cond_timedwait");
4705    if (!FilterSpuriousWakeups) break;         // previous semantics
4706    if (status == ETIMEDOUT) break;
4707    // We consume and ignore EINTR and spurious wakeups.
4708  }
4709  --_nParked;
4710  if (_Event >= 0) {
4711     ret = OS_OK;
4712  }
4713  _Event = 0;
4714  status = pthread_mutex_unlock(_mutex);
4715  assert_status(status == 0, status, "mutex_unlock");
4716  assert (_nParked == 0, "invariant");
4717  return ret;
4718}
4719
4720void os::PlatformEvent::unpark() {
4721  int v, AnyWaiters;
4722  for (;;) {
4723    v = _Event;
4724    if (v > 0) {
4725      // The LD of _Event could have reordered or be satisfied
4726      // by a read-aside from this processor's write buffer.
4727      // To avoid problems execute a barrier and then
4728      // ratify the value.
4729      OrderAccess::fence();
4730      if (_Event == v) return;
4731      continue;
4732    }
4733    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4734  }
4735  if (v < 0) {
4736    // Wait for the thread associated with the event to vacate
4737    int status = pthread_mutex_lock(_mutex);
4738    assert_status(status == 0, status, "mutex_lock");
4739    AnyWaiters = _nParked;
4740
4741    if (AnyWaiters != 0) {
4742      // We intentional signal *after* dropping the lock
4743      // to avoid a common class of futile wakeups.
4744      status = pthread_cond_signal(_cond);
4745      assert_status(status == 0, status, "cond_signal");
4746    }
4747    // Mutex should be locked for pthread_cond_signal(_cond).
4748    status = pthread_mutex_unlock(_mutex);
4749    assert_status(status == 0, status, "mutex_unlock");
4750  }
4751
4752  // Note that we signal() _after dropping the lock for "immortal" Events.
4753  // This is safe and avoids a common class of futile wakeups. In rare
4754  // circumstances this can cause a thread to return prematurely from
4755  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4756  // simply re-test the condition and re-park itself.
4757}
4758
4759
4760// JSR166
4761// -------------------------------------------------------
4762
4763//
4764// The solaris and linux implementations of park/unpark are fairly
4765// conservative for now, but can be improved. They currently use a
4766// mutex/condvar pair, plus a a count.
4767// Park decrements count if > 0, else does a condvar wait. Unpark
4768// sets count to 1 and signals condvar. Only one thread ever waits
4769// on the condvar. Contention seen when trying to park implies that someone
4770// is unparking you, so don't wait. And spurious returns are fine, so there
4771// is no need to track notifications.
4772//
4773
4774#define MAX_SECS 100000000
4775//
4776// This code is common to linux and solaris and will be moved to a
4777// common place in dolphin.
4778//
4779// The passed in time value is either a relative time in nanoseconds
4780// or an absolute time in milliseconds. Either way it has to be unpacked
4781// into suitable seconds and nanoseconds components and stored in the
4782// given timespec structure.
4783// Given time is a 64-bit value and the time_t used in the timespec is only
4784// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4785// overflow if times way in the future are given. Further on Solaris versions
4786// prior to 10 there is a restriction (see cond_timedwait) that the specified
4787// number of seconds, in abstime, is less than current_time + 100,000,000.
4788// As it will be 28 years before "now + 100000000" will overflow we can
4789// ignore overflow and just impose a hard-limit on seconds using the value
4790// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4791// years from "now".
4792//
4793
4794static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4795  assert (time > 0, "convertTime");
4796
4797  struct timeval now;
4798  int status = gettimeofday(&now, NULL);
4799  assert(status == 0, "gettimeofday");
4800
4801  time_t max_secs = now.tv_sec + MAX_SECS;
4802
4803  if (isAbsolute) {
4804    jlong secs = time / 1000;
4805    if (secs > max_secs) {
4806      absTime->tv_sec = max_secs;
4807    }
4808    else {
4809      absTime->tv_sec = secs;
4810    }
4811    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4812  }
4813  else {
4814    jlong secs = time / NANOSECS_PER_SEC;
4815    if (secs >= MAX_SECS) {
4816      absTime->tv_sec = max_secs;
4817      absTime->tv_nsec = 0;
4818    }
4819    else {
4820      absTime->tv_sec = now.tv_sec + secs;
4821      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4822      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4823        absTime->tv_nsec -= NANOSECS_PER_SEC;
4824        ++absTime->tv_sec; // note: this must be <= max_secs
4825      }
4826    }
4827  }
4828  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4829  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4830  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4831  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4832}
4833
4834void Parker::park(bool isAbsolute, jlong time) {
4835  // Optional fast-path check:
4836  // Return immediately if a permit is available.
4837  if (_counter > 0) {
4838      _counter = 0;
4839      OrderAccess::fence();
4840      return;
4841  }
4842
4843  Thread* thread = Thread::current();
4844  assert(thread->is_Java_thread(), "Must be JavaThread");
4845  JavaThread *jt = (JavaThread *)thread;
4846
4847  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4848  // Check interrupt before trying to wait
4849  if (Thread::is_interrupted(thread, false)) {
4850    return;
4851  }
4852
4853  // Next, demultiplex/decode time arguments
4854  timespec absTime;
4855  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4856    return;
4857  }
4858  if (time > 0) {
4859    unpackTime(&absTime, isAbsolute, time);
4860  }
4861
4862
4863  // Enter safepoint region
4864  // Beware of deadlocks such as 6317397.
4865  // The per-thread Parker:: mutex is a classic leaf-lock.
4866  // In particular a thread must never block on the Threads_lock while
4867  // holding the Parker:: mutex. If safepoints are pending both the
4868  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4869  ThreadBlockInVM tbivm(jt);
4870
4871  // Don't wait if cannot get lock since interference arises from
4872  // unblocking. Also. check interrupt before trying wait
4873  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4874    return;
4875  }
4876
4877  int status;
4878  if (_counter > 0) { // no wait needed
4879    _counter = 0;
4880    status = pthread_mutex_unlock(_mutex);
4881    assert (status == 0, "invariant");
4882    OrderAccess::fence();
4883    return;
4884  }
4885
4886#ifdef ASSERT
4887  // Don't catch signals while blocked; let the running threads have the signals.
4888  // (This allows a debugger to break into the running thread.)
4889  sigset_t oldsigs;
4890  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4891  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4892#endif
4893
4894  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4895  jt->set_suspend_equivalent();
4896  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4897
4898  if (time == 0) {
4899    status = pthread_cond_wait (_cond, _mutex);
4900  } else {
4901    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4902    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4903      pthread_cond_destroy (_cond);
4904      pthread_cond_init    (_cond, NULL);
4905    }
4906  }
4907  assert_status(status == 0 || status == EINTR ||
4908                status == ETIME || status == ETIMEDOUT,
4909                status, "cond_timedwait");
4910
4911#ifdef ASSERT
4912  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4913#endif
4914
4915  _counter = 0;
4916  status = pthread_mutex_unlock(_mutex);
4917  assert_status(status == 0, status, "invariant");
4918  // If externally suspended while waiting, re-suspend
4919  if (jt->handle_special_suspend_equivalent_condition()) {
4920    jt->java_suspend_self();
4921  }
4922
4923  OrderAccess::fence();
4924}
4925
4926void Parker::unpark() {
4927  int s, status;
4928  status = pthread_mutex_lock(_mutex);
4929  assert (status == 0, "invariant");
4930  s = _counter;
4931  _counter = 1;
4932  if (s < 1) {
4933    if (WorkAroundNPTLTimedWaitHang) {
4934      status = pthread_cond_signal (_cond);
4935      assert (status == 0, "invariant");
4936      status = pthread_mutex_unlock(_mutex);
4937      assert (status == 0, "invariant");
4938    } else {
4939      status = pthread_mutex_unlock(_mutex);
4940      assert (status == 0, "invariant");
4941      status = pthread_cond_signal (_cond);
4942      assert (status == 0, "invariant");
4943    }
4944  } else {
4945    pthread_mutex_unlock(_mutex);
4946    assert (status == 0, "invariant");
4947  }
4948}
4949
4950
4951extern char** environ;
4952
4953// Run the specified command in a separate process. Return its exit value,
4954// or -1 on failure (e.g. can't fork a new process).
4955// Unlike system(), this function can be called from signal handler. It
4956// doesn't block SIGINT et al.
4957int os::fork_and_exec(char* cmd) {
4958  char * argv[4] = {"sh", "-c", cmd, NULL};
4959
4960  pid_t pid = fork();
4961
4962  if (pid < 0) {
4963    // fork failed
4964    return -1;
4965
4966  } else if (pid == 0) {
4967    // child process
4968
4969    // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
4970    execve("/usr/bin/sh", argv, environ);
4971
4972    // execve failed
4973    _exit(-1);
4974
4975  } else  {
4976    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
4977    // care about the actual exit code, for now.
4978
4979    int status;
4980
4981    // Wait for the child process to exit.  This returns immediately if
4982    // the child has already exited. */
4983    while (waitpid(pid, &status, 0) < 0) {
4984        switch (errno) {
4985        case ECHILD: return 0;
4986        case EINTR: break;
4987        default: return -1;
4988        }
4989    }
4990
4991    if (WIFEXITED(status)) {
4992       // The child exited normally; get its exit code.
4993       return WEXITSTATUS(status);
4994    } else if (WIFSIGNALED(status)) {
4995       // The child exited because of a signal
4996       // The best value to return is 0x80 + signal number,
4997       // because that is what all Unix shells do, and because
4998       // it allows callers to distinguish between process exit and
4999       // process death by signal.
5000       return 0x80 + WTERMSIG(status);
5001    } else {
5002       // Unknown exit code; pass it through
5003       return status;
5004    }
5005  }
5006  // Remove warning.
5007  return -1;
5008}
5009
5010// is_headless_jre()
5011//
5012// Test for the existence of xawt/libmawt.so or libawt_xawt.so
5013// in order to report if we are running in a headless jre.
5014//
5015// Since JDK8 xawt/libmawt.so is moved into the same directory
5016// as libawt.so, and renamed libawt_xawt.so
5017bool os::is_headless_jre() {
5018  struct stat statbuf;
5019  char buf[MAXPATHLEN];
5020  char libmawtpath[MAXPATHLEN];
5021  const char *xawtstr  = "/xawt/libmawt.so";
5022  const char *new_xawtstr = "/libawt_xawt.so";
5023
5024  char *p;
5025
5026  // Get path to libjvm.so
5027  os::jvm_path(buf, sizeof(buf));
5028
5029  // Get rid of libjvm.so
5030  p = strrchr(buf, '/');
5031  if (p == NULL) return false;
5032  else *p = '\0';
5033
5034  // Get rid of client or server
5035  p = strrchr(buf, '/');
5036  if (p == NULL) return false;
5037  else *p = '\0';
5038
5039  // check xawt/libmawt.so
5040  strcpy(libmawtpath, buf);
5041  strcat(libmawtpath, xawtstr);
5042  if (::stat(libmawtpath, &statbuf) == 0) return false;
5043
5044  // check libawt_xawt.so
5045  strcpy(libmawtpath, buf);
5046  strcat(libmawtpath, new_xawtstr);
5047  if (::stat(libmawtpath, &statbuf) == 0) return false;
5048
5049  return true;
5050}
5051
5052// Get the default path to the core file
5053// Returns the length of the string
5054int os::get_core_path(char* buffer, size_t bufferSize) {
5055  const char* p = get_current_directory(buffer, bufferSize);
5056
5057  if (p == NULL) {
5058    assert(p != NULL, "failed to get current directory");
5059    return 0;
5060  }
5061
5062  return strlen(buffer);
5063}
5064
5065#ifndef PRODUCT
5066void TestReserveMemorySpecial_test() {
5067  // No tests available for this platform
5068}
5069#endif
5070