os_aix.cpp revision 5969:666e6ce3976c
1/*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2013 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libperfstat_aix.hpp"
40#include "loadlib_aix.hpp"
41#include "memory/allocation.inline.hpp"
42#include "memory/filemap.hpp"
43#include "mutex_aix.inline.hpp"
44#include "oops/oop.inline.hpp"
45#include "os_share_aix.hpp"
46#include "porting_aix.hpp"
47#include "prims/jniFastGetField.hpp"
48#include "prims/jvm.h"
49#include "prims/jvm_misc.hpp"
50#include "runtime/arguments.hpp"
51#include "runtime/extendedPC.hpp"
52#include "runtime/globals.hpp"
53#include "runtime/interfaceSupport.hpp"
54#include "runtime/java.hpp"
55#include "runtime/javaCalls.hpp"
56#include "runtime/mutexLocker.hpp"
57#include "runtime/objectMonitor.hpp"
58#include "runtime/osThread.hpp"
59#include "runtime/perfMemory.hpp"
60#include "runtime/sharedRuntime.hpp"
61#include "runtime/statSampler.hpp"
62#include "runtime/stubRoutines.hpp"
63#include "runtime/threadCritical.hpp"
64#include "runtime/timer.hpp"
65#include "services/attachListener.hpp"
66#include "services/runtimeService.hpp"
67#include "thread_aix.inline.hpp"
68#include "utilities/decoder.hpp"
69#include "utilities/defaultStream.hpp"
70#include "utilities/events.hpp"
71#include "utilities/growableArray.hpp"
72#include "utilities/vmError.hpp"
73#ifdef TARGET_ARCH_ppc
74# include "assembler_ppc.inline.hpp"
75# include "nativeInst_ppc.hpp"
76#endif
77#ifdef COMPILER1
78#include "c1/c1_Runtime1.hpp"
79#endif
80#ifdef COMPILER2
81#include "opto/runtime.hpp"
82#endif
83
84// put OS-includes here (sorted alphabetically)
85#include <errno.h>
86#include <fcntl.h>
87#include <inttypes.h>
88#include <poll.h>
89#include <procinfo.h>
90#include <pthread.h>
91#include <pwd.h>
92#include <semaphore.h>
93#include <signal.h>
94#include <stdint.h>
95#include <stdio.h>
96#include <string.h>
97#include <unistd.h>
98#include <sys/ioctl.h>
99#include <sys/ipc.h>
100#include <sys/mman.h>
101#include <sys/resource.h>
102#include <sys/select.h>
103#include <sys/shm.h>
104#include <sys/socket.h>
105#include <sys/stat.h>
106#include <sys/sysinfo.h>
107#include <sys/systemcfg.h>
108#include <sys/time.h>
109#include <sys/times.h>
110#include <sys/types.h>
111#include <sys/utsname.h>
112#include <sys/vminfo.h>
113#include <sys/wait.h>
114
115// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
116#if !defined(_AIXVERSION_610)
117extern "C" {
118  int getthrds64(pid_t ProcessIdentifier,
119                 struct thrdentry64* ThreadBuffer,
120                 int ThreadSize,
121                 tid64_t* IndexPointer,
122                 int Count);
123}
124#endif
125
126// Excerpts from systemcfg.h definitions newer than AIX 5.3
127#ifndef PV_7
128# define PV_7 0x200000          // Power PC 7
129# define PV_7_Compat 0x208000   // Power PC 7
130#endif
131
132#define MAX_PATH (2 * K)
133
134// for timer info max values which include all bits
135#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
136// for multipage initialization error analysis (in 'g_multipage_error')
137#define ERROR_MP_OS_TOO_OLD                          100
138#define ERROR_MP_EXTSHM_ACTIVE                       101
139#define ERROR_MP_VMGETINFO_FAILED                    102
140#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
141
142// the semantics in this file are thus that codeptr_t is a *real code ptr*
143// This means that any function taking codeptr_t as arguments will assume
144// a real codeptr and won't handle function descriptors (eg getFuncName),
145// whereas functions taking address as args will deal with function
146// descriptors (eg os::dll_address_to_library_name)
147typedef unsigned int* codeptr_t;
148
149// typedefs for stackslots, stack pointers, pointers to op codes
150typedef unsigned long stackslot_t;
151typedef stackslot_t* stackptr_t;
152
153// query dimensions of the stack of the calling thread
154static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
155
156// function to check a given stack pointer against given stack limits
157inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
158  if (((uintptr_t)sp) & 0x7) {
159    return false;
160  }
161  if (sp > stack_base) {
162    return false;
163  }
164  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
165    return false;
166  }
167  return true;
168}
169
170// returns true if function is a valid codepointer
171inline bool is_valid_codepointer(codeptr_t p) {
172  if (!p) {
173    return false;
174  }
175  if (((uintptr_t)p) & 0x3) {
176    return false;
177  }
178  if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
179    return false;
180  }
181  return true;
182}
183
184// macro to check a given stack pointer against given stack limits and to die if test fails
185#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
186    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
187}
188
189// macro to check the current stack pointer against given stacklimits
190#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
191  address sp; \
192  sp = os::current_stack_pointer(); \
193  CHECK_STACK_PTR(sp, stack_base, stack_size); \
194}
195
196////////////////////////////////////////////////////////////////////////////////
197// global variables (for a description see os_aix.hpp)
198
199julong    os::Aix::_physical_memory = 0;
200pthread_t os::Aix::_main_thread = ((pthread_t)0);
201int       os::Aix::_page_size = -1;
202int       os::Aix::_on_pase = -1;
203int       os::Aix::_os_version = -1;
204int       os::Aix::_stack_page_size = -1;
205size_t    os::Aix::_shm_default_page_size = -1;
206int       os::Aix::_can_use_64K_pages = -1;
207int       os::Aix::_can_use_16M_pages = -1;
208int       os::Aix::_xpg_sus_mode = -1;
209int       os::Aix::_extshm = -1;
210int       os::Aix::_logical_cpus = -1;
211
212////////////////////////////////////////////////////////////////////////////////
213// local variables
214
215static int      g_multipage_error  = -1;   // error analysis for multipage initialization
216static jlong    initial_time_count = 0;
217static int      clock_tics_per_sec = 100;
218static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
219static bool     check_signals      = true;
220static pid_t    _initial_pid       = 0;
221static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
222static sigset_t SR_sigset;
223static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
224
225julong os::available_memory() {
226  return Aix::available_memory();
227}
228
229julong os::Aix::available_memory() {
230  Unimplemented();
231  return 0;
232}
233
234julong os::physical_memory() {
235  return Aix::physical_memory();
236}
237
238////////////////////////////////////////////////////////////////////////////////
239// environment support
240
241bool os::getenv(const char* name, char* buf, int len) {
242  const char* val = ::getenv(name);
243  if (val != NULL && strlen(val) < (size_t)len) {
244    strcpy(buf, val);
245    return true;
246  }
247  if (len > 0) buf[0] = 0;  // return a null string
248  return false;
249}
250
251
252// Return true if user is running as root.
253
254bool os::have_special_privileges() {
255  static bool init = false;
256  static bool privileges = false;
257  if (!init) {
258    privileges = (getuid() != geteuid()) || (getgid() != getegid());
259    init = true;
260  }
261  return privileges;
262}
263
264// Helper function, emulates disclaim64 using multiple 32bit disclaims
265// because we cannot use disclaim64() on AS/400 and old AIX releases.
266static bool my_disclaim64(char* addr, size_t size) {
267
268  if (size == 0) {
269    return true;
270  }
271
272  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
273  const unsigned int maxDisclaimSize = 0x80000000;
274
275  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
276  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
277
278  char* p = addr;
279
280  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
281    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
282      //if (Verbose)
283      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
284      return false;
285    }
286    p += maxDisclaimSize;
287  }
288
289  if (lastDisclaimSize > 0) {
290    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
291      //if (Verbose)
292        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
293      return false;
294    }
295  }
296
297  return true;
298}
299
300// Cpu architecture string
301#if defined(PPC32)
302static char cpu_arch[] = "ppc";
303#elif defined(PPC64)
304static char cpu_arch[] = "ppc64";
305#else
306#error Add appropriate cpu_arch setting
307#endif
308
309
310// Given an address, returns the size of the page backing that address.
311size_t os::Aix::query_pagesize(void* addr) {
312
313  vm_page_info pi;
314  pi.addr = (uint64_t)addr;
315  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
316    return pi.pagesize;
317  } else {
318    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
319    assert(false, "vmgetinfo failed to retrieve page size");
320    return SIZE_4K;
321  }
322
323}
324
325// Returns the kernel thread id of the currently running thread.
326pid_t os::Aix::gettid() {
327  return (pid_t) thread_self();
328}
329
330void os::Aix::initialize_system_info() {
331
332  // get the number of online(logical) cpus instead of configured
333  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
334  assert(_processor_count > 0, "_processor_count must be > 0");
335
336  // retrieve total physical storage
337  os::Aix::meminfo_t mi;
338  if (!os::Aix::get_meminfo(&mi)) {
339    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
340    assert(false, "os::Aix::get_meminfo failed.");
341  }
342  _physical_memory = (julong) mi.real_total;
343}
344
345// Helper function for tracing page sizes.
346static const char* describe_pagesize(size_t pagesize) {
347  switch (pagesize) {
348    case SIZE_4K : return "4K";
349    case SIZE_64K: return "64K";
350    case SIZE_16M: return "16M";
351    case SIZE_16G: return "16G";
352    default:
353      assert(false, "surprise");
354      return "??";
355  }
356}
357
358// Retrieve information about multipage size support. Will initialize
359// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
360// Aix::_can_use_16M_pages.
361// Must be called before calling os::large_page_init().
362void os::Aix::query_multipage_support() {
363
364  guarantee(_page_size == -1 &&
365            _stack_page_size == -1 &&
366            _can_use_64K_pages == -1 &&
367            _can_use_16M_pages == -1 &&
368            g_multipage_error == -1,
369            "do not call twice");
370
371  _page_size = ::sysconf(_SC_PAGESIZE);
372
373  // This really would surprise me.
374  assert(_page_size == SIZE_4K, "surprise!");
375
376
377  // query default data page size (default page size for C-Heap, pthread stacks and .bss).
378  // Default data page size is influenced either by linker options (-bdatapsize)
379  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
380  // default should be 4K.
381  size_t data_page_size = SIZE_4K;
382  {
383    void* p = ::malloc(SIZE_16M);
384    data_page_size = os::Aix::query_pagesize(p);
385    ::free(p);
386  }
387
388  // query default shm page size (LDR_CNTRL SHMPSIZE)
389  {
390    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
391    guarantee(shmid != -1, "shmget failed");
392    void* p = ::shmat(shmid, NULL, 0);
393    ::shmctl(shmid, IPC_RMID, NULL);
394    guarantee(p != (void*) -1, "shmat failed");
395    _shm_default_page_size = os::Aix::query_pagesize(p);
396    ::shmdt(p);
397  }
398
399  // before querying the stack page size, make sure we are not running as primordial
400  // thread (because primordial thread's stack may have different page size than
401  // pthread thread stacks). Running a VM on the primordial thread won't work for a
402  // number of reasons so we may just as well guarantee it here
403  guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
404
405  // query stack page size
406  {
407    int dummy = 0;
408    _stack_page_size = os::Aix::query_pagesize(&dummy);
409    // everything else would surprise me and should be looked into
410    guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
411    // also, just for completeness: pthread stacks are allocated from C heap, so
412    // stack page size should be the same as data page size
413    guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
414  }
415
416  // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
417  // for system V shm.
418  if (Aix::extshm()) {
419    if (Verbose) {
420      fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
421                      "Please make sure EXTSHM is OFF for large page support.\n");
422    }
423    g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
424    _can_use_64K_pages = _can_use_16M_pages = 0;
425    goto query_multipage_support_end;
426  }
427
428  // now check which page sizes the OS claims it supports, and of those, which actually can be used.
429  {
430    const int MAX_PAGE_SIZES = 4;
431    psize_t sizes[MAX_PAGE_SIZES];
432    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
433    if (num_psizes == -1) {
434      if (Verbose) {
435        fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
436        fprintf(stderr, "disabling multipage support.\n");
437      }
438      g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
439      _can_use_64K_pages = _can_use_16M_pages = 0;
440      goto query_multipage_support_end;
441    }
442    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
443    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
444    if (Verbose) {
445      fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
446      for (int i = 0; i < num_psizes; i ++) {
447        fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
448      }
449      fprintf(stderr, " .\n");
450    }
451
452    // Can we use 64K, 16M pages?
453    _can_use_64K_pages = 0;
454    _can_use_16M_pages = 0;
455    for (int i = 0; i < num_psizes; i ++) {
456      if (sizes[i] == SIZE_64K) {
457        _can_use_64K_pages = 1;
458      } else if (sizes[i] == SIZE_16M) {
459        _can_use_16M_pages = 1;
460      }
461    }
462
463    if (!_can_use_64K_pages) {
464      g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
465    }
466
467    // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
468    // there must be an actual 16M page pool, and we must run with enough rights.
469    if (_can_use_16M_pages) {
470      const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
471      guarantee(shmid != -1, "shmget failed");
472      struct shmid_ds shm_buf = { 0 };
473      shm_buf.shm_pagesize = SIZE_16M;
474      const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
475      const int en = errno;
476      ::shmctl(shmid, IPC_RMID, NULL);
477      if (!can_set_pagesize) {
478        if (Verbose) {
479          fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
480                          "Will deactivate 16M support.\n", en, strerror(en));
481        }
482        _can_use_16M_pages = 0;
483      }
484    }
485
486  } // end: check which pages can be used for shared memory
487
488query_multipage_support_end:
489
490  guarantee(_page_size != -1 &&
491            _stack_page_size != -1 &&
492            _can_use_64K_pages != -1 &&
493            _can_use_16M_pages != -1, "Page sizes not properly initialized");
494
495  if (_can_use_64K_pages) {
496    g_multipage_error = 0;
497  }
498
499  if (Verbose) {
500    fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
501    fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
502    fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
503    fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
504    fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
505    fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
506  }
507
508} // end os::Aix::query_multipage_support()
509
510
511// The code for this method was initially derived from the version in os_linux.cpp
512void os::init_system_properties_values() {
513  // The next few definitions allow the code to be verbatim:
514#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
515#define DEFAULT_LIBPATH "/usr/lib:/lib"
516#define EXTENSIONS_DIR  "/lib/ext"
517#define ENDORSED_DIR    "/lib/endorsed"
518
519  // sysclasspath, java_home, dll_dir
520  char *home_path;
521  char *dll_path;
522  char *pslash;
523  char buf[MAXPATHLEN];
524  os::jvm_path(buf, sizeof(buf));
525
526  // Found the full path to libjvm.so.
527  // Now cut the path to <java_home>/jre if we can.
528  *(strrchr(buf, '/')) = '\0'; // get rid of /libjvm.so
529  pslash = strrchr(buf, '/');
530  if (pslash != NULL) {
531    *pslash = '\0';            // get rid of /{client|server|hotspot}
532  }
533
534  dll_path = malloc(strlen(buf) + 1);
535  strcpy(dll_path, buf);
536  Arguments::set_dll_dir(dll_path);
537
538  if (pslash != NULL) {
539    pslash = strrchr(buf, '/');
540    if (pslash != NULL) {
541      *pslash = '\0';          // get rid of /<arch>
542      pslash = strrchr(buf, '/');
543      if (pslash != NULL) {
544        *pslash = '\0';        // get rid of /lib
545      }
546    }
547  }
548
549  home_path = malloc(strlen(buf) + 1);
550  strcpy(home_path, buf);
551  Arguments::set_java_home(home_path);
552
553  if (!set_boot_path('/', ':')) return;
554
555  // Where to look for native libraries
556
557  // On Aix we get the user setting of LIBPATH
558  // Eventually, all the library path setting will be done here.
559  char *ld_library_path;
560
561  // Construct the invariant part of ld_library_path.
562  ld_library_path = (char *) malloc(sizeof(DEFAULT_LIBPATH));
563  sprintf(ld_library_path, DEFAULT_LIBPATH);
564
565  // Get the user setting of LIBPATH, and prepended it.
566  char *v = ::getenv("LIBPATH");
567  if (v == NULL) {
568    v = "";
569  }
570
571  char *t = ld_library_path;
572  // That's +1 for the colon and +1 for the trailing '\0'
573  ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
574  sprintf(ld_library_path, "%s:%s", v, t);
575
576  Arguments::set_library_path(ld_library_path);
577
578  // Extensions directories
579  char* cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(EXTENSIONS_DIR));
580  sprintf(cbuf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
581  Arguments::set_ext_dirs(cbuf);
582
583  // Endorsed standards default directory.
584  cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
585  sprintf(cbuf, "%s" ENDORSED_DIR, Arguments::get_java_home());
586  Arguments::set_endorsed_dirs(cbuf);
587
588#undef malloc
589#undef DEFAULT_LIBPATH
590#undef EXTENSIONS_DIR
591#undef ENDORSED_DIR
592}
593
594////////////////////////////////////////////////////////////////////////////////
595// breakpoint support
596
597void os::breakpoint() {
598  BREAKPOINT;
599}
600
601extern "C" void breakpoint() {
602  // use debugger to set breakpoint here
603}
604
605////////////////////////////////////////////////////////////////////////////////
606// signal support
607
608debug_only(static bool signal_sets_initialized = false);
609static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
610
611bool os::Aix::is_sig_ignored(int sig) {
612  struct sigaction oact;
613  sigaction(sig, (struct sigaction*)NULL, &oact);
614  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
615    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
616  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
617    return true;
618  else
619    return false;
620}
621
622void os::Aix::signal_sets_init() {
623  // Should also have an assertion stating we are still single-threaded.
624  assert(!signal_sets_initialized, "Already initialized");
625  // Fill in signals that are necessarily unblocked for all threads in
626  // the VM. Currently, we unblock the following signals:
627  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
628  //                         by -Xrs (=ReduceSignalUsage));
629  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
630  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
631  // the dispositions or masks wrt these signals.
632  // Programs embedding the VM that want to use the above signals for their
633  // own purposes must, at this time, use the "-Xrs" option to prevent
634  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
635  // (See bug 4345157, and other related bugs).
636  // In reality, though, unblocking these signals is really a nop, since
637  // these signals are not blocked by default.
638  sigemptyset(&unblocked_sigs);
639  sigemptyset(&allowdebug_blocked_sigs);
640  sigaddset(&unblocked_sigs, SIGILL);
641  sigaddset(&unblocked_sigs, SIGSEGV);
642  sigaddset(&unblocked_sigs, SIGBUS);
643  sigaddset(&unblocked_sigs, SIGFPE);
644  sigaddset(&unblocked_sigs, SIGTRAP);
645  sigaddset(&unblocked_sigs, SIGDANGER);
646  sigaddset(&unblocked_sigs, SR_signum);
647
648  if (!ReduceSignalUsage) {
649   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
650     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
651     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
652   }
653   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
654     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
655     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
656   }
657   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
658     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
659     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
660   }
661  }
662  // Fill in signals that are blocked by all but the VM thread.
663  sigemptyset(&vm_sigs);
664  if (!ReduceSignalUsage)
665    sigaddset(&vm_sigs, BREAK_SIGNAL);
666  debug_only(signal_sets_initialized = true);
667}
668
669// These are signals that are unblocked while a thread is running Java.
670// (For some reason, they get blocked by default.)
671sigset_t* os::Aix::unblocked_signals() {
672  assert(signal_sets_initialized, "Not initialized");
673  return &unblocked_sigs;
674}
675
676// These are the signals that are blocked while a (non-VM) thread is
677// running Java. Only the VM thread handles these signals.
678sigset_t* os::Aix::vm_signals() {
679  assert(signal_sets_initialized, "Not initialized");
680  return &vm_sigs;
681}
682
683// These are signals that are blocked during cond_wait to allow debugger in
684sigset_t* os::Aix::allowdebug_blocked_signals() {
685  assert(signal_sets_initialized, "Not initialized");
686  return &allowdebug_blocked_sigs;
687}
688
689void os::Aix::hotspot_sigmask(Thread* thread) {
690
691  //Save caller's signal mask before setting VM signal mask
692  sigset_t caller_sigmask;
693  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
694
695  OSThread* osthread = thread->osthread();
696  osthread->set_caller_sigmask(caller_sigmask);
697
698  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
699
700  if (!ReduceSignalUsage) {
701    if (thread->is_VM_thread()) {
702      // Only the VM thread handles BREAK_SIGNAL ...
703      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
704    } else {
705      // ... all other threads block BREAK_SIGNAL
706      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
707    }
708  }
709}
710
711// retrieve memory information.
712// Returns false if something went wrong;
713// content of pmi undefined in this case.
714bool os::Aix::get_meminfo(meminfo_t* pmi) {
715
716  assert(pmi, "get_meminfo: invalid parameter");
717
718  memset(pmi, 0, sizeof(meminfo_t));
719
720  if (os::Aix::on_pase()) {
721
722    Unimplemented();
723    return false;
724
725  } else {
726
727    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
728    // See:
729    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
730    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
731    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
732    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
733
734    perfstat_memory_total_t psmt;
735    memset (&psmt, '\0', sizeof(psmt));
736    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
737    if (rc == -1) {
738      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
739      assert(0, "perfstat_memory_total() failed");
740      return false;
741    }
742
743    assert(rc == 1, "perfstat_memory_total() - weird return code");
744
745    // excerpt from
746    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
747    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
748    // The fields of perfstat_memory_total_t:
749    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
750    // u_longlong_t real_total         Total real memory (in 4 KB pages).
751    // u_longlong_t real_free          Free real memory (in 4 KB pages).
752    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
753    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
754
755    pmi->virt_total = psmt.virt_total * 4096;
756    pmi->real_total = psmt.real_total * 4096;
757    pmi->real_free = psmt.real_free * 4096;
758    pmi->pgsp_total = psmt.pgsp_total * 4096;
759    pmi->pgsp_free = psmt.pgsp_free * 4096;
760
761    return true;
762
763  }
764} // end os::Aix::get_meminfo
765
766// Retrieve global cpu information.
767// Returns false if something went wrong;
768// the content of pci is undefined in this case.
769bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
770  assert(pci, "get_cpuinfo: invalid parameter");
771  memset(pci, 0, sizeof(cpuinfo_t));
772
773  perfstat_cpu_total_t psct;
774  memset (&psct, '\0', sizeof(psct));
775
776  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
777    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
778    assert(0, "perfstat_cpu_total() failed");
779    return false;
780  }
781
782  // global cpu information
783  strcpy (pci->description, psct.description);
784  pci->processorHZ = psct.processorHZ;
785  pci->ncpus = psct.ncpus;
786  os::Aix::_logical_cpus = psct.ncpus;
787  for (int i = 0; i < 3; i++) {
788    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
789  }
790
791  // get the processor version from _system_configuration
792  switch (_system_configuration.version) {
793  case PV_7:
794    strcpy(pci->version, "Power PC 7");
795    break;
796  case PV_6_1:
797    strcpy(pci->version, "Power PC 6 DD1.x");
798    break;
799  case PV_6:
800    strcpy(pci->version, "Power PC 6");
801    break;
802  case PV_5:
803    strcpy(pci->version, "Power PC 5");
804    break;
805  case PV_5_2:
806    strcpy(pci->version, "Power PC 5_2");
807    break;
808  case PV_5_3:
809    strcpy(pci->version, "Power PC 5_3");
810    break;
811  case PV_5_Compat:
812    strcpy(pci->version, "PV_5_Compat");
813    break;
814  case PV_6_Compat:
815    strcpy(pci->version, "PV_6_Compat");
816    break;
817  case PV_7_Compat:
818    strcpy(pci->version, "PV_7_Compat");
819    break;
820  default:
821    strcpy(pci->version, "unknown");
822  }
823
824  return true;
825
826} //end os::Aix::get_cpuinfo
827
828//////////////////////////////////////////////////////////////////////////////
829// detecting pthread library
830
831void os::Aix::libpthread_init() {
832  return;
833}
834
835//////////////////////////////////////////////////////////////////////////////
836// create new thread
837
838// Thread start routine for all newly created threads
839static void *java_start(Thread *thread) {
840
841  // find out my own stack dimensions
842  {
843    // actually, this should do exactly the same as thread->record_stack_base_and_size...
844    address base = 0;
845    size_t size = 0;
846    query_stack_dimensions(&base, &size);
847    thread->set_stack_base(base);
848    thread->set_stack_size(size);
849  }
850
851  // Do some sanity checks.
852  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
853
854  // Try to randomize the cache line index of hot stack frames.
855  // This helps when threads of the same stack traces evict each other's
856  // cache lines. The threads can be either from the same JVM instance, or
857  // from different JVM instances. The benefit is especially true for
858  // processors with hyperthreading technology.
859
860  static int counter = 0;
861  int pid = os::current_process_id();
862  alloca(((pid ^ counter++) & 7) * 128);
863
864  ThreadLocalStorage::set_thread(thread);
865
866  OSThread* osthread = thread->osthread();
867
868  // thread_id is kernel thread id (similar to Solaris LWP id)
869  osthread->set_thread_id(os::Aix::gettid());
870
871  // initialize signal mask for this thread
872  os::Aix::hotspot_sigmask(thread);
873
874  // initialize floating point control register
875  os::Aix::init_thread_fpu_state();
876
877  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
878
879  // call one more level start routine
880  thread->run();
881
882  return 0;
883}
884
885bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
886
887  // We want the whole function to be synchronized.
888  ThreadCritical cs;
889
890  assert(thread->osthread() == NULL, "caller responsible");
891
892  // Allocate the OSThread object
893  OSThread* osthread = new OSThread(NULL, NULL);
894  if (osthread == NULL) {
895    return false;
896  }
897
898  // set the correct thread state
899  osthread->set_thread_type(thr_type);
900
901  // Initial state is ALLOCATED but not INITIALIZED
902  osthread->set_state(ALLOCATED);
903
904  thread->set_osthread(osthread);
905
906  // init thread attributes
907  pthread_attr_t attr;
908  pthread_attr_init(&attr);
909  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
910
911  // Make sure we run in 1:1 kernel-user-thread mode.
912  if (os::Aix::on_aix()) {
913    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
914    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
915  } // end: aix
916
917  // Start in suspended state, and in os::thread_start, wake the thread up.
918  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
919
920  // calculate stack size if it's not specified by caller
921  if (os::Aix::supports_variable_stack_size()) {
922    if (stack_size == 0) {
923      stack_size = os::Aix::default_stack_size(thr_type);
924
925      switch (thr_type) {
926      case os::java_thread:
927        // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
928        assert(JavaThread::stack_size_at_create() > 0, "this should be set");
929        stack_size = JavaThread::stack_size_at_create();
930        break;
931      case os::compiler_thread:
932        if (CompilerThreadStackSize > 0) {
933          stack_size = (size_t)(CompilerThreadStackSize * K);
934          break;
935        } // else fall through:
936          // use VMThreadStackSize if CompilerThreadStackSize is not defined
937      case os::vm_thread:
938      case os::pgc_thread:
939      case os::cgc_thread:
940      case os::watcher_thread:
941        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
942        break;
943      }
944    }
945
946    stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
947    pthread_attr_setstacksize(&attr, stack_size);
948  } //else let thread_create() pick the default value (96 K on AIX)
949
950  pthread_t tid;
951  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
952
953  pthread_attr_destroy(&attr);
954
955  if (ret != 0) {
956    if (PrintMiscellaneous && (Verbose || WizardMode)) {
957      perror("pthread_create()");
958    }
959    // Need to clean up stuff we've allocated so far
960    thread->set_osthread(NULL);
961    delete osthread;
962    return false;
963  }
964
965  // Store pthread info into the OSThread
966  osthread->set_pthread_id(tid);
967
968  return true;
969}
970
971/////////////////////////////////////////////////////////////////////////////
972// attach existing thread
973
974// bootstrap the main thread
975bool os::create_main_thread(JavaThread* thread) {
976  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
977  return create_attached_thread(thread);
978}
979
980bool os::create_attached_thread(JavaThread* thread) {
981#ifdef ASSERT
982    thread->verify_not_published();
983#endif
984
985  // Allocate the OSThread object
986  OSThread* osthread = new OSThread(NULL, NULL);
987
988  if (osthread == NULL) {
989    return false;
990  }
991
992  // Store pthread info into the OSThread
993  osthread->set_thread_id(os::Aix::gettid());
994  osthread->set_pthread_id(::pthread_self());
995
996  // initialize floating point control register
997  os::Aix::init_thread_fpu_state();
998
999  // some sanity checks
1000  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
1001
1002  // Initial thread state is RUNNABLE
1003  osthread->set_state(RUNNABLE);
1004
1005  thread->set_osthread(osthread);
1006
1007  if (UseNUMA) {
1008    int lgrp_id = os::numa_get_group_id();
1009    if (lgrp_id != -1) {
1010      thread->set_lgrp_id(lgrp_id);
1011    }
1012  }
1013
1014  // initialize signal mask for this thread
1015  // and save the caller's signal mask
1016  os::Aix::hotspot_sigmask(thread);
1017
1018  return true;
1019}
1020
1021void os::pd_start_thread(Thread* thread) {
1022  int status = pthread_continue_np(thread->osthread()->pthread_id());
1023  assert(status == 0, "thr_continue failed");
1024}
1025
1026// Free OS resources related to the OSThread
1027void os::free_thread(OSThread* osthread) {
1028  assert(osthread != NULL, "osthread not set");
1029
1030  if (Thread::current()->osthread() == osthread) {
1031    // Restore caller's signal mask
1032    sigset_t sigmask = osthread->caller_sigmask();
1033    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1034   }
1035
1036  delete osthread;
1037}
1038
1039//////////////////////////////////////////////////////////////////////////////
1040// thread local storage
1041
1042int os::allocate_thread_local_storage() {
1043  pthread_key_t key;
1044  int rslt = pthread_key_create(&key, NULL);
1045  assert(rslt == 0, "cannot allocate thread local storage");
1046  return (int)key;
1047}
1048
1049// Note: This is currently not used by VM, as we don't destroy TLS key
1050// on VM exit.
1051void os::free_thread_local_storage(int index) {
1052  int rslt = pthread_key_delete((pthread_key_t)index);
1053  assert(rslt == 0, "invalid index");
1054}
1055
1056void os::thread_local_storage_at_put(int index, void* value) {
1057  int rslt = pthread_setspecific((pthread_key_t)index, value);
1058  assert(rslt == 0, "pthread_setspecific failed");
1059}
1060
1061extern "C" Thread* get_thread() {
1062  return ThreadLocalStorage::thread();
1063}
1064
1065////////////////////////////////////////////////////////////////////////////////
1066// time support
1067
1068// Time since start-up in seconds to a fine granularity.
1069// Used by VMSelfDestructTimer and the MemProfiler.
1070double os::elapsedTime() {
1071  return (double)(os::elapsed_counter()) * 0.000001;
1072}
1073
1074jlong os::elapsed_counter() {
1075  timeval time;
1076  int status = gettimeofday(&time, NULL);
1077  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1078}
1079
1080jlong os::elapsed_frequency() {
1081  return (1000 * 1000);
1082}
1083
1084// For now, we say that linux does not support vtime. I have no idea
1085// whether it can actually be made to (DLD, 9/13/05).
1086
1087bool os::supports_vtime() { return false; }
1088bool os::enable_vtime()   { return false; }
1089bool os::vtime_enabled()  { return false; }
1090double os::elapsedVTime() {
1091  // better than nothing, but not much
1092  return elapsedTime();
1093}
1094
1095jlong os::javaTimeMillis() {
1096  timeval time;
1097  int status = gettimeofday(&time, NULL);
1098  assert(status != -1, "aix error at gettimeofday()");
1099  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1100}
1101
1102// We need to manually declare mread_real_time,
1103// because IBM didn't provide a prototype in time.h.
1104// (they probably only ever tested in C, not C++)
1105extern "C"
1106int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1107
1108jlong os::javaTimeNanos() {
1109  if (os::Aix::on_pase()) {
1110    Unimplemented();
1111    return 0;
1112  }
1113  else {
1114    // On AIX use the precision of processors real time clock
1115    // or time base registers.
1116    timebasestruct_t time;
1117    int rc;
1118
1119    // If the CPU has a time register, it will be used and
1120    // we have to convert to real time first. After convertion we have following data:
1121    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1122    // time.tb_low  [nanoseconds after the last full second above]
1123    // We better use mread_real_time here instead of read_real_time
1124    // to ensure that we will get a monotonic increasing time.
1125    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1126      rc = time_base_to_time(&time, TIMEBASE_SZ);
1127      assert(rc != -1, "aix error at time_base_to_time()");
1128    }
1129    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1130  }
1131}
1132
1133void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1134  {
1135    // gettimeofday - based on time in seconds since the Epoch thus does not wrap
1136    info_ptr->max_value = ALL_64_BITS;
1137
1138    // gettimeofday is a real time clock so it skips
1139    info_ptr->may_skip_backward = true;
1140    info_ptr->may_skip_forward = true;
1141  }
1142
1143  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1144}
1145
1146// Return the real, user, and system times in seconds from an
1147// arbitrary fixed point in the past.
1148bool os::getTimesSecs(double* process_real_time,
1149                      double* process_user_time,
1150                      double* process_system_time) {
1151  Unimplemented();
1152  return false;
1153}
1154
1155
1156char * os::local_time_string(char *buf, size_t buflen) {
1157  struct tm t;
1158  time_t long_time;
1159  time(&long_time);
1160  localtime_r(&long_time, &t);
1161  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1162               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1163               t.tm_hour, t.tm_min, t.tm_sec);
1164  return buf;
1165}
1166
1167struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1168  return localtime_r(clock, res);
1169}
1170
1171////////////////////////////////////////////////////////////////////////////////
1172// runtime exit support
1173
1174// Note: os::shutdown() might be called very early during initialization, or
1175// called from signal handler. Before adding something to os::shutdown(), make
1176// sure it is async-safe and can handle partially initialized VM.
1177void os::shutdown() {
1178
1179  // allow PerfMemory to attempt cleanup of any persistent resources
1180  perfMemory_exit();
1181
1182  // needs to remove object in file system
1183  AttachListener::abort();
1184
1185  // flush buffered output, finish log files
1186  ostream_abort();
1187
1188  // Check for abort hook
1189  abort_hook_t abort_hook = Arguments::abort_hook();
1190  if (abort_hook != NULL) {
1191    abort_hook();
1192  }
1193
1194}
1195
1196// Note: os::abort() might be called very early during initialization, or
1197// called from signal handler. Before adding something to os::abort(), make
1198// sure it is async-safe and can handle partially initialized VM.
1199void os::abort(bool dump_core) {
1200  os::shutdown();
1201  if (dump_core) {
1202#ifndef PRODUCT
1203    fdStream out(defaultStream::output_fd());
1204    out.print_raw("Current thread is ");
1205    char buf[16];
1206    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1207    out.print_raw_cr(buf);
1208    out.print_raw_cr("Dumping core ...");
1209#endif
1210    ::abort(); // dump core
1211  }
1212
1213  ::exit(1);
1214}
1215
1216// Die immediately, no exit hook, no abort hook, no cleanup.
1217void os::die() {
1218  ::abort();
1219}
1220
1221// Unused on Aix for now.
1222void os::set_error_file(const char *logfile) {}
1223
1224
1225// This method is a copy of JDK's sysGetLastErrorString
1226// from src/solaris/hpi/src/system_md.c
1227
1228size_t os::lasterror(char *buf, size_t len) {
1229
1230  if (errno == 0)  return 0;
1231
1232  const char *s = ::strerror(errno);
1233  size_t n = ::strlen(s);
1234  if (n >= len) {
1235    n = len - 1;
1236  }
1237  ::strncpy(buf, s, n);
1238  buf[n] = '\0';
1239  return n;
1240}
1241
1242intx os::current_thread_id() { return (intx)pthread_self(); }
1243int os::current_process_id() {
1244
1245  // This implementation returns a unique pid, the pid of the
1246  // launcher thread that starts the vm 'process'.
1247
1248  // Under POSIX, getpid() returns the same pid as the
1249  // launcher thread rather than a unique pid per thread.
1250  // Use gettid() if you want the old pre NPTL behaviour.
1251
1252  // if you are looking for the result of a call to getpid() that
1253  // returns a unique pid for the calling thread, then look at the
1254  // OSThread::thread_id() method in osThread_linux.hpp file
1255
1256  return (int)(_initial_pid ? _initial_pid : getpid());
1257}
1258
1259// DLL functions
1260
1261const char* os::dll_file_extension() { return ".so"; }
1262
1263// This must be hard coded because it's the system's temporary
1264// directory not the java application's temp directory, ala java.io.tmpdir.
1265const char* os::get_temp_directory() { return "/tmp"; }
1266
1267static bool file_exists(const char* filename) {
1268  struct stat statbuf;
1269  if (filename == NULL || strlen(filename) == 0) {
1270    return false;
1271  }
1272  return os::stat(filename, &statbuf) == 0;
1273}
1274
1275bool os::dll_build_name(char* buffer, size_t buflen,
1276                        const char* pname, const char* fname) {
1277  bool retval = false;
1278  // Copied from libhpi
1279  const size_t pnamelen = pname ? strlen(pname) : 0;
1280
1281  // Return error on buffer overflow.
1282  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1283    *buffer = '\0';
1284    return retval;
1285  }
1286
1287  if (pnamelen == 0) {
1288    snprintf(buffer, buflen, "lib%s.so", fname);
1289    retval = true;
1290  } else if (strchr(pname, *os::path_separator()) != NULL) {
1291    int n;
1292    char** pelements = split_path(pname, &n);
1293    for (int i = 0; i < n; i++) {
1294      // Really shouldn't be NULL, but check can't hurt
1295      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1296        continue; // skip the empty path values
1297      }
1298      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1299      if (file_exists(buffer)) {
1300        retval = true;
1301        break;
1302      }
1303    }
1304    // release the storage
1305    for (int i = 0; i < n; i++) {
1306      if (pelements[i] != NULL) {
1307        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1308      }
1309    }
1310    if (pelements != NULL) {
1311      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1312    }
1313  } else {
1314    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1315    retval = true;
1316  }
1317  return retval;
1318}
1319
1320// Check if addr is inside libjvm.so.
1321bool os::address_is_in_vm(address addr) {
1322
1323  // Input could be a real pc or a function pointer literal. The latter
1324  // would be a function descriptor residing in the data segment of a module.
1325
1326  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1327  if (lib) {
1328    if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1329      return true;
1330    } else {
1331      return false;
1332    }
1333  } else {
1334    lib = LoadedLibraries::find_for_data_address(addr);
1335    if (lib) {
1336      if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1337        return true;
1338      } else {
1339        return false;
1340      }
1341    } else {
1342      return false;
1343    }
1344  }
1345}
1346
1347// Resolve an AIX function descriptor literal to a code pointer.
1348// If the input is a valid code pointer to a text segment of a loaded module,
1349//   it is returned unchanged.
1350// If the input is a valid AIX function descriptor, it is resolved to the
1351//   code entry point.
1352// If the input is neither a valid function descriptor nor a valid code pointer,
1353//   NULL is returned.
1354static address resolve_function_descriptor_to_code_pointer(address p) {
1355
1356  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1357  if (lib) {
1358    // its a real code pointer
1359    return p;
1360  } else {
1361    lib = LoadedLibraries::find_for_data_address(p);
1362    if (lib) {
1363      // pointer to data segment, potential function descriptor
1364      address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1365      if (LoadedLibraries::find_for_text_address(code_entry)) {
1366        // Its a function descriptor
1367        return code_entry;
1368      }
1369    }
1370  }
1371  return NULL;
1372}
1373
1374bool os::dll_address_to_function_name(address addr, char *buf,
1375                                      int buflen, int *offset) {
1376  if (offset) {
1377    *offset = -1;
1378  }
1379  if (buf) {
1380    buf[0] = '\0';
1381  }
1382
1383  // Resolve function ptr literals first.
1384  addr = resolve_function_descriptor_to_code_pointer(addr);
1385  if (!addr) {
1386    return false;
1387  }
1388
1389  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1390  return Decoder::decode(addr, buf, buflen, offset);
1391}
1392
1393static int getModuleName(codeptr_t pc,                    // [in] program counter
1394                         char* p_name, size_t namelen,    // [out] optional: function name
1395                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1396                         ) {
1397
1398  // initialize output parameters
1399  if (p_name && namelen > 0) {
1400    *p_name = '\0';
1401  }
1402  if (p_errmsg && errmsglen > 0) {
1403    *p_errmsg = '\0';
1404  }
1405
1406  const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1407  if (lib) {
1408    if (p_name && namelen > 0) {
1409      sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1410    }
1411    return 0;
1412  }
1413
1414  if (Verbose) {
1415    fprintf(stderr, "pc outside any module");
1416  }
1417
1418  return -1;
1419
1420}
1421
1422bool os::dll_address_to_library_name(address addr, char* buf,
1423                                     int buflen, int* offset) {
1424  if (offset) {
1425    *offset = -1;
1426  }
1427  if (buf) {
1428      buf[0] = '\0';
1429  }
1430
1431  // Resolve function ptr literals first.
1432  addr = resolve_function_descriptor_to_code_pointer(addr);
1433  if (!addr) {
1434    return false;
1435  }
1436
1437  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1438    return true;
1439  }
1440  return false;
1441}
1442
1443// Loads .dll/.so and in case of error it checks if .dll/.so was built
1444// for the same architecture as Hotspot is running on
1445void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1446
1447  if (ebuf && ebuflen > 0) {
1448    ebuf[0] = '\0';
1449    ebuf[ebuflen - 1] = '\0';
1450  }
1451
1452  if (!filename || strlen(filename) == 0) {
1453    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1454    return NULL;
1455  }
1456
1457  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1458  void * result= ::dlopen(filename, RTLD_LAZY);
1459  if (result != NULL) {
1460    // Reload dll cache. Don't do this in signal handling.
1461    LoadedLibraries::reload();
1462    return result;
1463  } else {
1464    // error analysis when dlopen fails
1465    const char* const error_report = ::dlerror();
1466    if (error_report && ebuf && ebuflen > 0) {
1467      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1468               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1469    }
1470  }
1471  return NULL;
1472}
1473
1474// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1475// chances are you might want to run the generated bits against glibc-2.0
1476// libdl.so, so always use locking for any version of glibc.
1477void* os::dll_lookup(void* handle, const char* name) {
1478  pthread_mutex_lock(&dl_mutex);
1479  void* res = dlsym(handle, name);
1480  pthread_mutex_unlock(&dl_mutex);
1481  return res;
1482}
1483
1484void os::print_dll_info(outputStream *st) {
1485  st->print_cr("Dynamic libraries:");
1486  LoadedLibraries::print(st);
1487}
1488
1489void os::print_os_info(outputStream* st) {
1490  st->print("OS:");
1491
1492  st->print("uname:");
1493  struct utsname name;
1494  uname(&name);
1495  st->print(name.sysname); st->print(" ");
1496  st->print(name.nodename); st->print(" ");
1497  st->print(name.release); st->print(" ");
1498  st->print(name.version); st->print(" ");
1499  st->print(name.machine);
1500  st->cr();
1501
1502  // rlimit
1503  st->print("rlimit:");
1504  struct rlimit rlim;
1505
1506  st->print(" STACK ");
1507  getrlimit(RLIMIT_STACK, &rlim);
1508  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1509  else st->print("%uk", rlim.rlim_cur >> 10);
1510
1511  st->print(", CORE ");
1512  getrlimit(RLIMIT_CORE, &rlim);
1513  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1514  else st->print("%uk", rlim.rlim_cur >> 10);
1515
1516  st->print(", NPROC ");
1517  st->print("%d", sysconf(_SC_CHILD_MAX));
1518
1519  st->print(", NOFILE ");
1520  getrlimit(RLIMIT_NOFILE, &rlim);
1521  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1522  else st->print("%d", rlim.rlim_cur);
1523
1524  st->print(", AS ");
1525  getrlimit(RLIMIT_AS, &rlim);
1526  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1527  else st->print("%uk", rlim.rlim_cur >> 10);
1528
1529  // Print limits on DATA, because it limits the C-heap.
1530  st->print(", DATA ");
1531  getrlimit(RLIMIT_DATA, &rlim);
1532  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1533  else st->print("%uk", rlim.rlim_cur >> 10);
1534  st->cr();
1535
1536  // load average
1537  st->print("load average:");
1538  double loadavg[3] = {-1.L, -1.L, -1.L};
1539  os::loadavg(loadavg, 3);
1540  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1541  st->cr();
1542}
1543
1544void os::print_memory_info(outputStream* st) {
1545
1546  st->print_cr("Memory:");
1547
1548  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1549  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1550  st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1551  st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1552  st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1553  if (g_multipage_error != 0) {
1554    st->print_cr("  multipage error: %d", g_multipage_error);
1555  }
1556
1557  // print out LDR_CNTRL because it affects the default page sizes
1558  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1559  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1560
1561  const char* const extshm = ::getenv("EXTSHM");
1562  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1563
1564  // Call os::Aix::get_meminfo() to retrieve memory statistics.
1565  os::Aix::meminfo_t mi;
1566  if (os::Aix::get_meminfo(&mi)) {
1567    char buffer[256];
1568    if (os::Aix::on_aix()) {
1569      jio_snprintf(buffer, sizeof(buffer),
1570                   "  physical total : %llu\n"
1571                   "  physical free  : %llu\n"
1572                   "  swap total     : %llu\n"
1573                   "  swap free      : %llu\n",
1574                   mi.real_total,
1575                   mi.real_free,
1576                   mi.pgsp_total,
1577                   mi.pgsp_free);
1578    } else {
1579      Unimplemented();
1580    }
1581    st->print_raw(buffer);
1582  } else {
1583    st->print_cr("  (no more information available)");
1584  }
1585}
1586
1587void os::pd_print_cpu_info(outputStream* st) {
1588  // cpu
1589  st->print("CPU:");
1590  st->print("total %d", os::processor_count());
1591  // It's not safe to query number of active processors after crash
1592  // st->print("(active %d)", os::active_processor_count());
1593  st->print(" %s", VM_Version::cpu_features());
1594  st->cr();
1595}
1596
1597void os::print_siginfo(outputStream* st, void* siginfo) {
1598  // Use common posix version.
1599  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1600  st->cr();
1601}
1602
1603
1604static void print_signal_handler(outputStream* st, int sig,
1605                                 char* buf, size_t buflen);
1606
1607void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1608  st->print_cr("Signal Handlers:");
1609  print_signal_handler(st, SIGSEGV, buf, buflen);
1610  print_signal_handler(st, SIGBUS , buf, buflen);
1611  print_signal_handler(st, SIGFPE , buf, buflen);
1612  print_signal_handler(st, SIGPIPE, buf, buflen);
1613  print_signal_handler(st, SIGXFSZ, buf, buflen);
1614  print_signal_handler(st, SIGILL , buf, buflen);
1615  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1616  print_signal_handler(st, SR_signum, buf, buflen);
1617  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1618  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1619  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1620  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1621  print_signal_handler(st, SIGTRAP, buf, buflen);
1622  print_signal_handler(st, SIGDANGER, buf, buflen);
1623}
1624
1625static char saved_jvm_path[MAXPATHLEN] = {0};
1626
1627// Find the full path to the current module, libjvm.so or libjvm_g.so
1628void os::jvm_path(char *buf, jint buflen) {
1629  // Error checking.
1630  if (buflen < MAXPATHLEN) {
1631    assert(false, "must use a large-enough buffer");
1632    buf[0] = '\0';
1633    return;
1634  }
1635  // Lazy resolve the path to current module.
1636  if (saved_jvm_path[0] != 0) {
1637    strcpy(buf, saved_jvm_path);
1638    return;
1639  }
1640
1641  Dl_info dlinfo;
1642  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1643  assert(ret != 0, "cannot locate libjvm");
1644  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1645  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1646
1647  strcpy(saved_jvm_path, buf);
1648}
1649
1650void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1651  // no prefix required, not even "_"
1652}
1653
1654void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1655  // no suffix required
1656}
1657
1658////////////////////////////////////////////////////////////////////////////////
1659// sun.misc.Signal support
1660
1661static volatile jint sigint_count = 0;
1662
1663static void
1664UserHandler(int sig, void *siginfo, void *context) {
1665  // 4511530 - sem_post is serialized and handled by the manager thread. When
1666  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1667  // don't want to flood the manager thread with sem_post requests.
1668  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1669    return;
1670
1671  // Ctrl-C is pressed during error reporting, likely because the error
1672  // handler fails to abort. Let VM die immediately.
1673  if (sig == SIGINT && is_error_reported()) {
1674    os::die();
1675  }
1676
1677  os::signal_notify(sig);
1678}
1679
1680void* os::user_handler() {
1681  return CAST_FROM_FN_PTR(void*, UserHandler);
1682}
1683
1684extern "C" {
1685  typedef void (*sa_handler_t)(int);
1686  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1687}
1688
1689void* os::signal(int signal_number, void* handler) {
1690  struct sigaction sigAct, oldSigAct;
1691
1692  sigfillset(&(sigAct.sa_mask));
1693
1694  // Do not block out synchronous signals in the signal handler.
1695  // Blocking synchronous signals only makes sense if you can really
1696  // be sure that those signals won't happen during signal handling,
1697  // when the blocking applies.  Normal signal handlers are lean and
1698  // do not cause signals. But our signal handlers tend to be "risky"
1699  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1700  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1701  // by a SIGILL, which was blocked due to the signal mask. The process
1702  // just hung forever. Better to crash from a secondary signal than to hang.
1703  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1704  sigdelset(&(sigAct.sa_mask), SIGBUS);
1705  sigdelset(&(sigAct.sa_mask), SIGILL);
1706  sigdelset(&(sigAct.sa_mask), SIGFPE);
1707  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1708
1709  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1710
1711  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1712
1713  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1714    // -1 means registration failed
1715    return (void *)-1;
1716  }
1717
1718  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1719}
1720
1721void os::signal_raise(int signal_number) {
1722  ::raise(signal_number);
1723}
1724
1725//
1726// The following code is moved from os.cpp for making this
1727// code platform specific, which it is by its very nature.
1728//
1729
1730// Will be modified when max signal is changed to be dynamic
1731int os::sigexitnum_pd() {
1732  return NSIG;
1733}
1734
1735// a counter for each possible signal value
1736static volatile jint pending_signals[NSIG+1] = { 0 };
1737
1738// Linux(POSIX) specific hand shaking semaphore.
1739static sem_t sig_sem;
1740
1741void os::signal_init_pd() {
1742  // Initialize signal structures
1743  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1744
1745  // Initialize signal semaphore
1746  int rc = ::sem_init(&sig_sem, 0, 0);
1747  guarantee(rc != -1, "sem_init failed");
1748}
1749
1750void os::signal_notify(int sig) {
1751  Atomic::inc(&pending_signals[sig]);
1752  ::sem_post(&sig_sem);
1753}
1754
1755static int check_pending_signals(bool wait) {
1756  Atomic::store(0, &sigint_count);
1757  for (;;) {
1758    for (int i = 0; i < NSIG + 1; i++) {
1759      jint n = pending_signals[i];
1760      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1761        return i;
1762      }
1763    }
1764    if (!wait) {
1765      return -1;
1766    }
1767    JavaThread *thread = JavaThread::current();
1768    ThreadBlockInVM tbivm(thread);
1769
1770    bool threadIsSuspended;
1771    do {
1772      thread->set_suspend_equivalent();
1773      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1774
1775      ::sem_wait(&sig_sem);
1776
1777      // were we externally suspended while we were waiting?
1778      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1779      if (threadIsSuspended) {
1780        //
1781        // The semaphore has been incremented, but while we were waiting
1782        // another thread suspended us. We don't want to continue running
1783        // while suspended because that would surprise the thread that
1784        // suspended us.
1785        //
1786        ::sem_post(&sig_sem);
1787
1788        thread->java_suspend_self();
1789      }
1790    } while (threadIsSuspended);
1791  }
1792}
1793
1794int os::signal_lookup() {
1795  return check_pending_signals(false);
1796}
1797
1798int os::signal_wait() {
1799  return check_pending_signals(true);
1800}
1801
1802////////////////////////////////////////////////////////////////////////////////
1803// Virtual Memory
1804
1805// AddrRange describes an immutable address range
1806//
1807// This is a helper class for the 'shared memory bookkeeping' below.
1808class AddrRange {
1809  friend class ShmBkBlock;
1810
1811  char* _start;
1812  size_t _size;
1813
1814public:
1815
1816  AddrRange(char* start, size_t size)
1817    : _start(start), _size(size)
1818  {}
1819
1820  AddrRange(const AddrRange& r)
1821    : _start(r.start()), _size(r.size())
1822  {}
1823
1824  char* start() const { return _start; }
1825  size_t size() const { return _size; }
1826  char* end() const { return _start + _size; }
1827  bool is_empty() const { return _size == 0 ? true : false; }
1828
1829  static AddrRange empty_range() { return AddrRange(NULL, 0); }
1830
1831  bool contains(const char* p) const {
1832    return start() <= p && end() > p;
1833  }
1834
1835  bool contains(const AddrRange& range) const {
1836    return start() <= range.start() && end() >= range.end();
1837  }
1838
1839  bool intersects(const AddrRange& range) const {
1840    return (range.start() <= start() && range.end() > start()) ||
1841           (range.start() < end() && range.end() >= end()) ||
1842           contains(range);
1843  }
1844
1845  bool is_same_range(const AddrRange& range) const {
1846    return start() == range.start() && size() == range.size();
1847  }
1848
1849  // return the closest inside range consisting of whole pages
1850  AddrRange find_closest_aligned_range(size_t pagesize) const {
1851    if (pagesize == 0 || is_empty()) {
1852      return empty_range();
1853    }
1854    char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1855    char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1856    if (from > to) {
1857      return empty_range();
1858    }
1859    return AddrRange(from, to - from);
1860  }
1861};
1862
1863////////////////////////////////////////////////////////////////////////////
1864// shared memory bookkeeping
1865//
1866// the os::reserve_memory() API and friends hand out different kind of memory, depending
1867// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1868//
1869// But these memory types have to be treated differently. For example, to uncommit
1870// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1871// disclaim64() is needed.
1872//
1873// Therefore we need to keep track of the allocated memory segments and their
1874// properties.
1875
1876// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1877class ShmBkBlock {
1878
1879  ShmBkBlock* _next;
1880
1881protected:
1882
1883  AddrRange _range;
1884  const size_t _pagesize;
1885  const bool _pinned;
1886
1887public:
1888
1889  ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1890    : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1891
1892    assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1893    assert(!_range.is_empty(), "invalid range");
1894  }
1895
1896  virtual void print(outputStream* st) const {
1897    st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1898              _range.start(), _range.end(), _range.size(),
1899              _range.size() / _pagesize, describe_pagesize(_pagesize),
1900              _pinned ? "pinned" : "");
1901  }
1902
1903  enum Type { MMAP, SHMAT };
1904  virtual Type getType() = 0;
1905
1906  char* base() const { return _range.start(); }
1907  size_t size() const { return _range.size(); }
1908
1909  void setAddrRange(AddrRange range) {
1910    _range = range;
1911  }
1912
1913  bool containsAddress(const char* p) const {
1914    return _range.contains(p);
1915  }
1916
1917  bool containsRange(const char* p, size_t size) const {
1918    return _range.contains(AddrRange((char*)p, size));
1919  }
1920
1921  bool isSameRange(const char* p, size_t size) const {
1922    return _range.is_same_range(AddrRange((char*)p, size));
1923  }
1924
1925  virtual bool disclaim(char* p, size_t size) = 0;
1926  virtual bool release() = 0;
1927
1928  // blocks live in a list.
1929  ShmBkBlock* next() const { return _next; }
1930  void set_next(ShmBkBlock* blk) { _next = blk; }
1931
1932}; // end: ShmBkBlock
1933
1934
1935// ShmBkMappedBlock: describes an block allocated with mmap()
1936class ShmBkMappedBlock : public ShmBkBlock {
1937public:
1938
1939  ShmBkMappedBlock(AddrRange range)
1940    : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1941
1942  void print(outputStream* st) const {
1943    ShmBkBlock::print(st);
1944    st->print_cr(" - mmap'ed");
1945  }
1946
1947  Type getType() {
1948    return MMAP;
1949  }
1950
1951  bool disclaim(char* p, size_t size) {
1952
1953    AddrRange r(p, size);
1954
1955    guarantee(_range.contains(r), "invalid disclaim");
1956
1957    // only disclaim whole ranges.
1958    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1959    if (r2.is_empty()) {
1960      return true;
1961    }
1962
1963    const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1964
1965    if (rc != 0) {
1966      warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1967    }
1968
1969    return rc == 0 ? true : false;
1970  }
1971
1972  bool release() {
1973    // mmap'ed blocks are released using munmap
1974    if (::munmap(_range.start(), _range.size()) != 0) {
1975      warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1976      return false;
1977    }
1978    return true;
1979  }
1980}; // end: ShmBkMappedBlock
1981
1982// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
1983class ShmBkShmatedBlock : public ShmBkBlock {
1984public:
1985
1986  ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
1987    : ShmBkBlock(range, pagesize, pinned) {}
1988
1989  void print(outputStream* st) const {
1990    ShmBkBlock::print(st);
1991    st->print_cr(" - shmat'ed");
1992  }
1993
1994  Type getType() {
1995    return SHMAT;
1996  }
1997
1998  bool disclaim(char* p, size_t size) {
1999
2000    AddrRange r(p, size);
2001
2002    if (_pinned) {
2003      return true;
2004    }
2005
2006    // shmat'ed blocks are disclaimed using disclaim64
2007    guarantee(_range.contains(r), "invalid disclaim");
2008
2009    // only disclaim whole ranges.
2010    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2011    if (r2.is_empty()) {
2012      return true;
2013    }
2014
2015    const bool rc = my_disclaim64(r2.start(), r2.size());
2016
2017    if (Verbose && !rc) {
2018      warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2019    }
2020
2021    return rc;
2022  }
2023
2024  bool release() {
2025    bool rc = false;
2026    if (::shmdt(_range.start()) != 0) {
2027      warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2028    } else {
2029      rc = true;
2030    }
2031    return rc;
2032  }
2033
2034}; // end: ShmBkShmatedBlock
2035
2036static ShmBkBlock* g_shmbk_list = NULL;
2037static volatile jint g_shmbk_table_lock = 0;
2038
2039// keep some usage statistics
2040static struct {
2041  int nodes;    // number of nodes in list
2042  size_t bytes; // reserved - not committed - bytes.
2043  int reserves; // how often reserve was called
2044  int lookups;  // how often a lookup was made
2045} g_shmbk_stats = { 0, 0, 0, 0 };
2046
2047// add information about a shared memory segment to the bookkeeping
2048static void shmbk_register(ShmBkBlock* p_block) {
2049  guarantee(p_block, "logic error");
2050  p_block->set_next(g_shmbk_list);
2051  g_shmbk_list = p_block;
2052  g_shmbk_stats.reserves ++;
2053  g_shmbk_stats.bytes += p_block->size();
2054  g_shmbk_stats.nodes ++;
2055}
2056
2057// remove information about a shared memory segment by its starting address
2058static void shmbk_unregister(ShmBkBlock* p_block) {
2059  ShmBkBlock* p = g_shmbk_list;
2060  ShmBkBlock* prev = NULL;
2061  while (p) {
2062    if (p == p_block) {
2063      if (prev) {
2064        prev->set_next(p->next());
2065      } else {
2066        g_shmbk_list = p->next();
2067      }
2068      g_shmbk_stats.nodes --;
2069      g_shmbk_stats.bytes -= p->size();
2070      return;
2071    }
2072    prev = p;
2073    p = p->next();
2074  }
2075  assert(false, "should not happen");
2076}
2077
2078// given a pointer, return shared memory bookkeeping record for the segment it points into
2079// using the returned block info must happen under lock protection
2080static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2081  g_shmbk_stats.lookups ++;
2082  ShmBkBlock* p = g_shmbk_list;
2083  while (p) {
2084    if (p->containsAddress(addr)) {
2085      return p;
2086    }
2087    p = p->next();
2088  }
2089  return NULL;
2090}
2091
2092// dump all information about all memory segments allocated with os::reserve_memory()
2093void shmbk_dump_info() {
2094  tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2095    "total reserves: %d total lookups: %d)",
2096    g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2097  const ShmBkBlock* p = g_shmbk_list;
2098  int i = 0;
2099  while (p) {
2100    p->print(tty);
2101    p = p->next();
2102    i ++;
2103  }
2104}
2105
2106#define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
2107#define UNLOCK_SHMBK   }
2108
2109// End: shared memory bookkeeping
2110////////////////////////////////////////////////////////////////////////////////////////////////////
2111
2112int os::vm_page_size() {
2113  // Seems redundant as all get out
2114  assert(os::Aix::page_size() != -1, "must call os::init");
2115  return os::Aix::page_size();
2116}
2117
2118// Aix allocates memory by pages.
2119int os::vm_allocation_granularity() {
2120  assert(os::Aix::page_size() != -1, "must call os::init");
2121  return os::Aix::page_size();
2122}
2123
2124int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2125
2126  // Commit is a noop. There is no explicit commit
2127  // needed on AIX. Memory is committed when touched.
2128  //
2129  // Debug : check address range for validity
2130#ifdef ASSERT
2131  LOCK_SHMBK
2132    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2133    if (!block) {
2134      fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2135      shmbk_dump_info();
2136      assert(false, "invalid pointer");
2137      return false;
2138    } else if (!block->containsRange(addr, size)) {
2139      fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2140      shmbk_dump_info();
2141      assert(false, "invalid range");
2142      return false;
2143    }
2144  UNLOCK_SHMBK
2145#endif // ASSERT
2146
2147  return 0;
2148}
2149
2150bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2151  return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2152}
2153
2154void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2155                                  const char* mesg) {
2156  assert(mesg != NULL, "mesg must be specified");
2157  os::Aix::commit_memory_impl(addr, size, exec);
2158}
2159
2160int os::Aix::commit_memory_impl(char* addr, size_t size,
2161                                size_t alignment_hint, bool exec) {
2162  return os::Aix::commit_memory_impl(addr, size, exec);
2163}
2164
2165bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2166                          bool exec) {
2167  return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2168}
2169
2170void os::pd_commit_memory_or_exit(char* addr, size_t size,
2171                                  size_t alignment_hint, bool exec,
2172                                  const char* mesg) {
2173  os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2174}
2175
2176bool os::pd_uncommit_memory(char* addr, size_t size) {
2177
2178  // Delegate to ShmBkBlock class which knows how to uncommit its memory.
2179
2180  bool rc = false;
2181  LOCK_SHMBK
2182    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2183    if (!block) {
2184      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2185      shmbk_dump_info();
2186      assert(false, "invalid pointer");
2187      return false;
2188    } else if (!block->containsRange(addr, size)) {
2189      fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2190      shmbk_dump_info();
2191      assert(false, "invalid range");
2192      return false;
2193    }
2194    rc = block->disclaim(addr, size);
2195  UNLOCK_SHMBK
2196
2197  if (Verbose && !rc) {
2198    warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2199  }
2200  return rc;
2201}
2202
2203bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2204  return os::guard_memory(addr, size);
2205}
2206
2207bool os::remove_stack_guard_pages(char* addr, size_t size) {
2208  return os::unguard_memory(addr, size);
2209}
2210
2211void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2212}
2213
2214void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2215}
2216
2217void os::numa_make_global(char *addr, size_t bytes) {
2218}
2219
2220void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2221}
2222
2223bool os::numa_topology_changed() {
2224  return false;
2225}
2226
2227size_t os::numa_get_groups_num() {
2228  return 1;
2229}
2230
2231int os::numa_get_group_id() {
2232  return 0;
2233}
2234
2235size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2236  if (size > 0) {
2237    ids[0] = 0;
2238    return 1;
2239  }
2240  return 0;
2241}
2242
2243bool os::get_page_info(char *start, page_info* info) {
2244  return false;
2245}
2246
2247char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2248  return end;
2249}
2250
2251// Flags for reserve_shmatted_memory:
2252#define RESSHM_WISHADDR_OR_FAIL                     1
2253#define RESSHM_TRY_16M_PAGES                        2
2254#define RESSHM_16M_PAGES_OR_FAIL                    4
2255
2256// Result of reserve_shmatted_memory:
2257struct shmatted_memory_info_t {
2258  char* addr;
2259  size_t pagesize;
2260  bool pinned;
2261};
2262
2263// Reserve a section of shmatted memory.
2264// params:
2265// bytes [in]: size of memory, in bytes
2266// requested_addr [in]: wish address.
2267//                      NULL = no wish.
2268//                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2269//                      be obtained, function will fail. Otherwise wish address is treated as hint and
2270//                      another pointer is returned.
2271// flags [in]:          some flags. Valid flags are:
2272//                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2273//                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2274//                          (requires UseLargePages and Use16MPages)
2275//                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2276//                          Otherwise any other page size will do.
2277// p_info [out] :       holds information about the created shared memory segment.
2278static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2279
2280  assert(p_info, "parameter error");
2281
2282  // init output struct.
2283  p_info->addr = NULL;
2284
2285  // neither should we be here for EXTSHM=ON.
2286  if (os::Aix::extshm()) {
2287    ShouldNotReachHere();
2288  }
2289
2290  // extract flags. sanity checks.
2291  const bool wishaddr_or_fail =
2292    flags & RESSHM_WISHADDR_OR_FAIL;
2293  const bool try_16M_pages =
2294    flags & RESSHM_TRY_16M_PAGES;
2295  const bool f16M_pages_or_fail =
2296    flags & RESSHM_16M_PAGES_OR_FAIL;
2297
2298  // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2299  // shmat will fail anyway, so save some cycles by failing right away
2300  if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2301    if (wishaddr_or_fail) {
2302      return false;
2303    } else {
2304      requested_addr = NULL;
2305    }
2306  }
2307
2308  char* addr = NULL;
2309
2310  // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2311  // pagesize dynamically.
2312  const size_t size = align_size_up(bytes, SIZE_16M);
2313
2314  // reserve the shared segment
2315  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2316  if (shmid == -1) {
2317    warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2318    return false;
2319  }
2320
2321  // Important note:
2322  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2323  // We must right after attaching it remove it from the system. System V shm segments are global and
2324  // survive the process.
2325  // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2326
2327  // try forcing the page size
2328  size_t pagesize = -1; // unknown so far
2329
2330  if (UseLargePages) {
2331
2332    struct shmid_ds shmbuf;
2333    memset(&shmbuf, 0, sizeof(shmbuf));
2334
2335    // First, try to take from 16M page pool if...
2336    if (os::Aix::can_use_16M_pages()  // we can ...
2337        && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2338        && try_16M_pages) {           // caller wants us to.
2339      shmbuf.shm_pagesize = SIZE_16M;
2340      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2341        pagesize = SIZE_16M;
2342      } else {
2343        warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2344                size / SIZE_16M, errno);
2345        if (f16M_pages_or_fail) {
2346          goto cleanup_shm;
2347        }
2348      }
2349    }
2350
2351    // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2352    // because the 64K page pool may also be exhausted.
2353    if (pagesize == -1) {
2354      shmbuf.shm_pagesize = SIZE_64K;
2355      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2356        pagesize = SIZE_64K;
2357      } else {
2358        warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2359                size / SIZE_64K, errno);
2360        // here I give up. leave page_size -1 - later, after attaching, we will query the
2361        // real page size of the attached memory. (in theory, it may be something different
2362        // from 4K if LDR_CNTRL SHM_PSIZE is set)
2363      }
2364    }
2365  }
2366
2367  // sanity point
2368  assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2369
2370  // Now attach the shared segment.
2371  addr = (char*) shmat(shmid, requested_addr, 0);
2372  if (addr == (char*)-1) {
2373    // How to handle attach failure:
2374    // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2375    // mandatory, fail, if not, retry anywhere.
2376    // If it failed for any other reason, treat that as fatal error.
2377    addr = NULL;
2378    if (requested_addr) {
2379      if (wishaddr_or_fail) {
2380        goto cleanup_shm;
2381      } else {
2382        addr = (char*) shmat(shmid, NULL, 0);
2383        if (addr == (char*)-1) { // fatal
2384          addr = NULL;
2385          warning("shmat failed (errno: %d)", errno);
2386          goto cleanup_shm;
2387        }
2388      }
2389    } else { // fatal
2390      addr = NULL;
2391      warning("shmat failed (errno: %d)", errno);
2392      goto cleanup_shm;
2393    }
2394  }
2395
2396  // sanity point
2397  assert(addr && addr != (char*) -1, "wrong address");
2398
2399  // after successful Attach remove the segment - right away.
2400  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2401    warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2402    guarantee(false, "failed to remove shared memory segment!");
2403  }
2404  shmid = -1;
2405
2406  // query the real page size. In case setting the page size did not work (see above), the system
2407  // may have given us something other then 4K (LDR_CNTRL)
2408  {
2409    const size_t real_pagesize = os::Aix::query_pagesize(addr);
2410    if (pagesize != -1) {
2411      assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2412    } else {
2413      pagesize = real_pagesize;
2414    }
2415  }
2416
2417  // Now register the reserved block with internal book keeping.
2418  LOCK_SHMBK
2419    const bool pinned = pagesize >= SIZE_16M ? true : false;
2420    ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2421    assert(p_block, "");
2422    shmbk_register(p_block);
2423  UNLOCK_SHMBK
2424
2425cleanup_shm:
2426
2427  // if we have not done so yet, remove the shared memory segment. This is very important.
2428  if (shmid != -1) {
2429    if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2430      warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2431      guarantee(false, "failed to remove shared memory segment!");
2432    }
2433    shmid = -1;
2434  }
2435
2436  // trace
2437  if (Verbose && !addr) {
2438    if (requested_addr != NULL) {
2439      warning("failed to shm-allocate 0x%llX bytes at with address 0x%p.", size, requested_addr);
2440    } else {
2441      warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2442    }
2443  }
2444
2445  // hand info to caller
2446  if (addr) {
2447    p_info->addr = addr;
2448    p_info->pagesize = pagesize;
2449    p_info->pinned = pagesize == SIZE_16M ? true : false;
2450  }
2451
2452  // sanity test:
2453  if (requested_addr && addr && wishaddr_or_fail) {
2454    guarantee(addr == requested_addr, "shmat error");
2455  }
2456
2457  // just one more test to really make sure we have no dangling shm segments.
2458  guarantee(shmid == -1, "dangling shm segments");
2459
2460  return addr ? true : false;
2461
2462} // end: reserve_shmatted_memory
2463
2464// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2465// will return NULL in case of an error.
2466static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2467
2468  // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2469  if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2470    warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2471    return NULL;
2472  }
2473
2474  const size_t size = align_size_up(bytes, SIZE_4K);
2475
2476  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2477  // msync(MS_INVALIDATE) (see os::uncommit_memory)
2478  int flags = MAP_ANONYMOUS | MAP_SHARED;
2479
2480  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2481  // it means if wishaddress is given but MAP_FIXED is not set.
2482  //
2483  // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2484  // clobbers the address range, which is probably not what the caller wants. That's
2485  // why I assert here (again) that the SPEC1170 compat mode is off.
2486  // If we want to be able to run under SPEC1170, we have to do some porting and
2487  // testing.
2488  if (requested_addr != NULL) {
2489    assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2490    flags |= MAP_FIXED;
2491  }
2492
2493  char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2494
2495  if (addr == MAP_FAILED) {
2496    // attach failed: tolerate for specific wish addresses. Not being able to attach
2497    // anywhere is a fatal error.
2498    if (requested_addr == NULL) {
2499      // It's ok to fail here if the machine has not enough memory.
2500      warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2501    }
2502    addr = NULL;
2503    goto cleanup_mmap;
2504  }
2505
2506  // If we did request a specific address and that address was not available, fail.
2507  if (addr && requested_addr) {
2508    guarantee(addr == requested_addr, "unexpected");
2509  }
2510
2511  // register this mmap'ed segment with book keeping
2512  LOCK_SHMBK
2513    ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2514    assert(p_block, "");
2515    shmbk_register(p_block);
2516  UNLOCK_SHMBK
2517
2518cleanup_mmap:
2519
2520  if (addr) {
2521    if (Verbose) {
2522      fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2523    }
2524  }
2525  else {
2526    if (requested_addr != NULL) {
2527      warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2528    } else {
2529      warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2530    }
2531  }
2532
2533  return addr;
2534
2535} // end: reserve_mmaped_memory
2536
2537// Reserves and attaches a shared memory segment.
2538// Will assert if a wish address is given and could not be obtained.
2539char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2540  return os::attempt_reserve_memory_at(bytes, requested_addr);
2541}
2542
2543bool os::pd_release_memory(char* addr, size_t size) {
2544
2545  // delegate to ShmBkBlock class which knows how to uncommit its memory.
2546
2547  bool rc = false;
2548  LOCK_SHMBK
2549    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2550    if (!block) {
2551      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2552      shmbk_dump_info();
2553      assert(false, "invalid pointer");
2554      return false;
2555    }
2556    else if (!block->isSameRange(addr, size)) {
2557      if (block->getType() == ShmBkBlock::MMAP) {
2558        // Release only the same range or a the beginning or the end of a range.
2559        if (block->base() == addr && size < block->size()) {
2560          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2561          assert(b, "");
2562          shmbk_register(b);
2563          block->setAddrRange(AddrRange(addr, size));
2564        }
2565        else if (addr > block->base() && addr + size == block->base() + block->size()) {
2566          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2567          assert(b, "");
2568          shmbk_register(b);
2569          block->setAddrRange(AddrRange(addr, size));
2570        }
2571        else {
2572          fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2573          shmbk_dump_info();
2574          assert(false, "invalid mmap range");
2575          return false;
2576        }
2577      }
2578      else {
2579        // Release only the same range. No partial release allowed.
2580        // Soften the requirement a bit, because the user may think he owns a smaller size
2581        // than the block is due to alignment etc.
2582        if (block->base() != addr || block->size() < size) {
2583          fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2584          shmbk_dump_info();
2585          assert(false, "invalid shmget range");
2586          return false;
2587        }
2588      }
2589    }
2590    rc = block->release();
2591    assert(rc, "release failed");
2592    // remove block from bookkeeping
2593    shmbk_unregister(block);
2594    delete block;
2595  UNLOCK_SHMBK
2596
2597  if (!rc) {
2598    warning("failed to released %lu bytes at 0x%p", size, addr);
2599  }
2600
2601  return rc;
2602}
2603
2604static bool checked_mprotect(char* addr, size_t size, int prot) {
2605
2606  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2607  // not tell me if protection failed when trying to protect an un-protectable range.
2608  //
2609  // This means if the memory was allocated using shmget/shmat, protection wont work
2610  // but mprotect will still return 0:
2611  //
2612  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2613
2614  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2615
2616  if (!rc) {
2617    const char* const s_errno = strerror(errno);
2618    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2619    return false;
2620  }
2621
2622  // mprotect success check
2623  //
2624  // Mprotect said it changed the protection but can I believe it?
2625  //
2626  // To be sure I need to check the protection afterwards. Try to
2627  // read from protected memory and check whether that causes a segfault.
2628  //
2629  if (!os::Aix::xpg_sus_mode()) {
2630
2631    if (StubRoutines::SafeFetch32_stub()) {
2632
2633      const bool read_protected =
2634        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2635         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2636
2637      if (prot & PROT_READ) {
2638        rc = !read_protected;
2639      } else {
2640        rc = read_protected;
2641      }
2642    }
2643  }
2644  if (!rc) {
2645    assert(false, "mprotect failed.");
2646  }
2647  return rc;
2648}
2649
2650// Set protections specified
2651bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2652  unsigned int p = 0;
2653  switch (prot) {
2654  case MEM_PROT_NONE: p = PROT_NONE; break;
2655  case MEM_PROT_READ: p = PROT_READ; break;
2656  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2657  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2658  default:
2659    ShouldNotReachHere();
2660  }
2661  // is_committed is unused.
2662  return checked_mprotect(addr, size, p);
2663}
2664
2665bool os::guard_memory(char* addr, size_t size) {
2666  return checked_mprotect(addr, size, PROT_NONE);
2667}
2668
2669bool os::unguard_memory(char* addr, size_t size) {
2670  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2671}
2672
2673// Large page support
2674
2675static size_t _large_page_size = 0;
2676
2677// Enable large page support if OS allows that.
2678void os::large_page_init() {
2679
2680  // Note: os::Aix::query_multipage_support must run first.
2681
2682  if (!UseLargePages) {
2683    return;
2684  }
2685
2686  if (!Aix::can_use_64K_pages()) {
2687    assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2688    UseLargePages = false;
2689    return;
2690  }
2691
2692  if (!Aix::can_use_16M_pages() && Use16MPages) {
2693    fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2694            " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2695  }
2696
2697  // Do not report 16M page alignment as part of os::_page_sizes if we are
2698  // explicitly forbidden from using 16M pages. Doing so would increase the
2699  // alignment the garbage collector calculates with, slightly increasing
2700  // heap usage. We should only pay for 16M alignment if we really want to
2701  // use 16M pages.
2702  if (Use16MPages && Aix::can_use_16M_pages()) {
2703    _large_page_size = SIZE_16M;
2704    _page_sizes[0] = SIZE_16M;
2705    _page_sizes[1] = SIZE_64K;
2706    _page_sizes[2] = SIZE_4K;
2707    _page_sizes[3] = 0;
2708  } else if (Aix::can_use_64K_pages()) {
2709    _large_page_size = SIZE_64K;
2710    _page_sizes[0] = SIZE_64K;
2711    _page_sizes[1] = SIZE_4K;
2712    _page_sizes[2] = 0;
2713  }
2714
2715  if (Verbose) {
2716    ("Default large page size is 0x%llX.", _large_page_size);
2717  }
2718} // end: os::large_page_init()
2719
2720char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2721  // "exec" is passed in but not used. Creating the shared image for
2722  // the code cache doesn't have an SHM_X executable permission to check.
2723  Unimplemented();
2724  return 0;
2725}
2726
2727bool os::release_memory_special(char* base, size_t bytes) {
2728  // detaching the SHM segment will also delete it, see reserve_memory_special()
2729  Unimplemented();
2730  return false;
2731}
2732
2733size_t os::large_page_size() {
2734  return _large_page_size;
2735}
2736
2737bool os::can_commit_large_page_memory() {
2738  // Well, sadly we cannot commit anything at all (see comment in
2739  // os::commit_memory) but we claim to so we can make use of large pages
2740  return true;
2741}
2742
2743bool os::can_execute_large_page_memory() {
2744  // We can do that
2745  return true;
2746}
2747
2748// Reserve memory at an arbitrary address, only if that area is
2749// available (and not reserved for something else).
2750char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2751
2752  bool use_mmap = false;
2753
2754  // mmap: smaller graining, no large page support
2755  // shm: large graining (256M), large page support, limited number of shm segments
2756  //
2757  // Prefer mmap wherever we either do not need large page support or have OS limits
2758
2759  if (!UseLargePages || bytes < SIZE_16M) {
2760    use_mmap = true;
2761  }
2762
2763  char* addr = NULL;
2764  if (use_mmap) {
2765    addr = reserve_mmaped_memory(bytes, requested_addr);
2766  } else {
2767    // shmat: wish address is mandatory, and do not try 16M pages here.
2768    shmatted_memory_info_t info;
2769    const int flags = RESSHM_WISHADDR_OR_FAIL;
2770    if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2771      addr = info.addr;
2772    }
2773  }
2774
2775  return addr;
2776}
2777
2778size_t os::read(int fd, void *buf, unsigned int nBytes) {
2779  return ::read(fd, buf, nBytes);
2780}
2781
2782#define NANOSECS_PER_MILLISEC 1000000
2783
2784int os::sleep(Thread* thread, jlong millis, bool interruptible) {
2785  assert(thread == Thread::current(), "thread consistency check");
2786
2787  // Prevent nasty overflow in deadline calculation
2788  // by handling long sleeps similar to solaris or windows.
2789  const jlong limit = INT_MAX;
2790  int result;
2791  while (millis > limit) {
2792    if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
2793      return result;
2794    }
2795    millis -= limit;
2796  }
2797
2798  ParkEvent * const slp = thread->_SleepEvent;
2799  slp->reset();
2800  OrderAccess::fence();
2801
2802  if (interruptible) {
2803    jlong prevtime = javaTimeNanos();
2804
2805    // Prevent precision loss and too long sleeps
2806    jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
2807
2808    for (;;) {
2809      if (os::is_interrupted(thread, true)) {
2810        return OS_INTRPT;
2811      }
2812
2813      jlong newtime = javaTimeNanos();
2814
2815      assert(newtime >= prevtime, "time moving backwards");
2816      // Doing prevtime and newtime in microseconds doesn't help precision,
2817      // and trying to round up to avoid lost milliseconds can result in a
2818      // too-short delay.
2819      millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
2820
2821      if (millis <= 0) {
2822        return OS_OK;
2823      }
2824
2825      // Stop sleeping if we passed the deadline
2826      if (newtime >= deadline) {
2827        return OS_OK;
2828      }
2829
2830      prevtime = newtime;
2831
2832      {
2833        assert(thread->is_Java_thread(), "sanity check");
2834        JavaThread *jt = (JavaThread *) thread;
2835        ThreadBlockInVM tbivm(jt);
2836        OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
2837
2838        jt->set_suspend_equivalent();
2839
2840        slp->park(millis);
2841
2842        // were we externally suspended while we were waiting?
2843        jt->check_and_wait_while_suspended();
2844      }
2845    }
2846  } else {
2847    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
2848    jlong prevtime = javaTimeNanos();
2849
2850    // Prevent precision loss and too long sleeps
2851    jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
2852
2853    for (;;) {
2854      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
2855      // the 1st iteration ...
2856      jlong newtime = javaTimeNanos();
2857
2858      if (newtime - prevtime < 0) {
2859        // time moving backwards, should only happen if no monotonic clock
2860        // not a guarantee() because JVM should not abort on kernel/glibc bugs
2861        // - HS14 Commented out as not implemented.
2862        // - TODO Maybe we should implement it?
2863        //assert(!Aix::supports_monotonic_clock(), "time moving backwards");
2864      } else {
2865        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
2866      }
2867
2868      if (millis <= 0) break;
2869
2870      if (newtime >= deadline) {
2871        break;
2872      }
2873
2874      prevtime = newtime;
2875      slp->park(millis);
2876    }
2877    return OS_OK;
2878  }
2879}
2880
2881int os::naked_sleep() {
2882  // %% make the sleep time an integer flag. for now use 1 millisec.
2883  return os::sleep(Thread::current(), 1, false);
2884}
2885
2886// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2887void os::infinite_sleep() {
2888  while (true) {    // sleep forever ...
2889    ::sleep(100);   // ... 100 seconds at a time
2890  }
2891}
2892
2893// Used to convert frequent JVM_Yield() to nops
2894bool os::dont_yield() {
2895  return DontYieldALot;
2896}
2897
2898void os::yield() {
2899  sched_yield();
2900}
2901
2902os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
2903
2904void os::yield_all(int attempts) {
2905  // Yields to all threads, including threads with lower priorities
2906  // Threads on Linux are all with same priority. The Solaris style
2907  // os::yield_all() with nanosleep(1ms) is not necessary.
2908  sched_yield();
2909}
2910
2911// Called from the tight loops to possibly influence time-sharing heuristics
2912void os::loop_breaker(int attempts) {
2913  os::yield_all(attempts);
2914}
2915
2916////////////////////////////////////////////////////////////////////////////////
2917// thread priority support
2918
2919// From AIX manpage to pthread_setschedparam
2920// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2921//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2922//
2923// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2924// range from 40 to 80, where 40 is the least favored priority and 80
2925// is the most favored."
2926//
2927// (Actually, I doubt this even has an impact on AIX, as we do kernel
2928// scheduling there; however, this still leaves iSeries.)
2929//
2930// We use the same values for AIX and PASE.
2931int os::java_to_os_priority[CriticalPriority + 1] = {
2932  54,             // 0 Entry should never be used
2933
2934  55,             // 1 MinPriority
2935  55,             // 2
2936  56,             // 3
2937
2938  56,             // 4
2939  57,             // 5 NormPriority
2940  57,             // 6
2941
2942  58,             // 7
2943  58,             // 8
2944  59,             // 9 NearMaxPriority
2945
2946  60,             // 10 MaxPriority
2947
2948  60              // 11 CriticalPriority
2949};
2950
2951OSReturn os::set_native_priority(Thread* thread, int newpri) {
2952  if (!UseThreadPriorities) return OS_OK;
2953  pthread_t thr = thread->osthread()->pthread_id();
2954  int policy = SCHED_OTHER;
2955  struct sched_param param;
2956  param.sched_priority = newpri;
2957  int ret = pthread_setschedparam(thr, policy, &param);
2958
2959  if (Verbose) {
2960    if (ret == 0) {
2961      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
2962    } else {
2963      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
2964              (int)thr, newpri, ret, strerror(ret));
2965    }
2966  }
2967  return (ret == 0) ? OS_OK : OS_ERR;
2968}
2969
2970OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2971  if (!UseThreadPriorities) {
2972    *priority_ptr = java_to_os_priority[NormPriority];
2973    return OS_OK;
2974  }
2975  pthread_t thr = thread->osthread()->pthread_id();
2976  int policy = SCHED_OTHER;
2977  struct sched_param param;
2978  int ret = pthread_getschedparam(thr, &policy, &param);
2979  *priority_ptr = param.sched_priority;
2980
2981  return (ret == 0) ? OS_OK : OS_ERR;
2982}
2983
2984// Hint to the underlying OS that a task switch would not be good.
2985// Void return because it's a hint and can fail.
2986void os::hint_no_preempt() {}
2987
2988////////////////////////////////////////////////////////////////////////////////
2989// suspend/resume support
2990
2991//  the low-level signal-based suspend/resume support is a remnant from the
2992//  old VM-suspension that used to be for java-suspension, safepoints etc,
2993//  within hotspot. Now there is a single use-case for this:
2994//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2995//      that runs in the watcher thread.
2996//  The remaining code is greatly simplified from the more general suspension
2997//  code that used to be used.
2998//
2999//  The protocol is quite simple:
3000//  - suspend:
3001//      - sends a signal to the target thread
3002//      - polls the suspend state of the osthread using a yield loop
3003//      - target thread signal handler (SR_handler) sets suspend state
3004//        and blocks in sigsuspend until continued
3005//  - resume:
3006//      - sets target osthread state to continue
3007//      - sends signal to end the sigsuspend loop in the SR_handler
3008//
3009//  Note that the SR_lock plays no role in this suspend/resume protocol.
3010//
3011
3012static void resume_clear_context(OSThread *osthread) {
3013  osthread->set_ucontext(NULL);
3014  osthread->set_siginfo(NULL);
3015}
3016
3017static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
3018  osthread->set_ucontext(context);
3019  osthread->set_siginfo(siginfo);
3020}
3021
3022//
3023// Handler function invoked when a thread's execution is suspended or
3024// resumed. We have to be careful that only async-safe functions are
3025// called here (Note: most pthread functions are not async safe and
3026// should be avoided.)
3027//
3028// Note: sigwait() is a more natural fit than sigsuspend() from an
3029// interface point of view, but sigwait() prevents the signal hander
3030// from being run. libpthread would get very confused by not having
3031// its signal handlers run and prevents sigwait()'s use with the
3032// mutex granting granting signal.
3033//
3034// Currently only ever called on the VMThread and JavaThreads (PC sampling).
3035//
3036static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
3037  // Save and restore errno to avoid confusing native code with EINTR
3038  // after sigsuspend.
3039  int old_errno = errno;
3040
3041  Thread* thread = Thread::current();
3042  OSThread* osthread = thread->osthread();
3043  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3044
3045  os::SuspendResume::State current = osthread->sr.state();
3046  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3047    suspend_save_context(osthread, siginfo, context);
3048
3049    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3050    os::SuspendResume::State state = osthread->sr.suspended();
3051    if (state == os::SuspendResume::SR_SUSPENDED) {
3052      sigset_t suspend_set;  // signals for sigsuspend()
3053
3054      // get current set of blocked signals and unblock resume signal
3055      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
3056      sigdelset(&suspend_set, SR_signum);
3057
3058      // wait here until we are resumed
3059      while (1) {
3060        sigsuspend(&suspend_set);
3061
3062        os::SuspendResume::State result = osthread->sr.running();
3063        if (result == os::SuspendResume::SR_RUNNING) {
3064          break;
3065        }
3066      }
3067
3068    } else if (state == os::SuspendResume::SR_RUNNING) {
3069      // request was cancelled, continue
3070    } else {
3071      ShouldNotReachHere();
3072    }
3073
3074    resume_clear_context(osthread);
3075  } else if (current == os::SuspendResume::SR_RUNNING) {
3076    // request was cancelled, continue
3077  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3078    // ignore
3079  } else {
3080    ShouldNotReachHere();
3081  }
3082
3083  errno = old_errno;
3084}
3085
3086
3087static int SR_initialize() {
3088  struct sigaction act;
3089  char *s;
3090  // Get signal number to use for suspend/resume
3091  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
3092    int sig = ::strtol(s, 0, 10);
3093    if (sig > 0 || sig < NSIG) {
3094      SR_signum = sig;
3095    }
3096  }
3097
3098  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
3099        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
3100
3101  sigemptyset(&SR_sigset);
3102  sigaddset(&SR_sigset, SR_signum);
3103
3104  // Set up signal handler for suspend/resume.
3105  act.sa_flags = SA_RESTART|SA_SIGINFO;
3106  act.sa_handler = (void (*)(int)) SR_handler;
3107
3108  // SR_signum is blocked by default.
3109  // 4528190 - We also need to block pthread restart signal (32 on all
3110  // supported Linux platforms). Note that LinuxThreads need to block
3111  // this signal for all threads to work properly. So we don't have
3112  // to use hard-coded signal number when setting up the mask.
3113  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3114
3115  if (sigaction(SR_signum, &act, 0) == -1) {
3116    return -1;
3117  }
3118
3119  // Save signal flag
3120  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3121  return 0;
3122}
3123
3124static int SR_finalize() {
3125  return 0;
3126}
3127
3128static int sr_notify(OSThread* osthread) {
3129  int status = pthread_kill(osthread->pthread_id(), SR_signum);
3130  assert_status(status == 0, status, "pthread_kill");
3131  return status;
3132}
3133
3134// "Randomly" selected value for how long we want to spin
3135// before bailing out on suspending a thread, also how often
3136// we send a signal to a thread we want to resume
3137static const int RANDOMLY_LARGE_INTEGER = 1000000;
3138static const int RANDOMLY_LARGE_INTEGER2 = 100;
3139
3140// returns true on success and false on error - really an error is fatal
3141// but this seems the normal response to library errors
3142static bool do_suspend(OSThread* osthread) {
3143  assert(osthread->sr.is_running(), "thread should be running");
3144  // mark as suspended and send signal
3145
3146  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3147    // failed to switch, state wasn't running?
3148    ShouldNotReachHere();
3149    return false;
3150  }
3151
3152  if (sr_notify(osthread) != 0) {
3153    // try to cancel, switch to running
3154
3155    os::SuspendResume::State result = osthread->sr.cancel_suspend();
3156    if (result == os::SuspendResume::SR_RUNNING) {
3157      // cancelled
3158      return false;
3159    } else if (result == os::SuspendResume::SR_SUSPENDED) {
3160      // somehow managed to suspend
3161      return true;
3162    } else {
3163      ShouldNotReachHere();
3164      return false;
3165    }
3166  }
3167
3168  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3169
3170  for (int n = 0; !osthread->sr.is_suspended(); n++) {
3171    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3172      os::yield_all(i);
3173    }
3174
3175    // timeout, try to cancel the request
3176    if (n >= RANDOMLY_LARGE_INTEGER) {
3177      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3178      if (cancelled == os::SuspendResume::SR_RUNNING) {
3179        return false;
3180      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3181        return true;
3182      } else {
3183        ShouldNotReachHere();
3184        return false;
3185      }
3186    }
3187  }
3188
3189  guarantee(osthread->sr.is_suspended(), "Must be suspended");
3190  return true;
3191}
3192
3193static void do_resume(OSThread* osthread) {
3194  //assert(osthread->sr.is_suspended(), "thread should be suspended");
3195
3196  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3197    // failed to switch to WAKEUP_REQUEST
3198    ShouldNotReachHere();
3199    return;
3200  }
3201
3202  while (!osthread->sr.is_running()) {
3203    if (sr_notify(osthread) == 0) {
3204      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3205        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3206          os::yield_all(i);
3207        }
3208      }
3209    } else {
3210      ShouldNotReachHere();
3211    }
3212  }
3213
3214  guarantee(osthread->sr.is_running(), "Must be running!");
3215}
3216
3217////////////////////////////////////////////////////////////////////////////////
3218// interrupt support
3219
3220void os::interrupt(Thread* thread) {
3221  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
3222    "possibility of dangling Thread pointer");
3223
3224  OSThread* osthread = thread->osthread();
3225
3226  if (!osthread->interrupted()) {
3227    osthread->set_interrupted(true);
3228    // More than one thread can get here with the same value of osthread,
3229    // resulting in multiple notifications.  We do, however, want the store
3230    // to interrupted() to be visible to other threads before we execute unpark().
3231    OrderAccess::fence();
3232    ParkEvent * const slp = thread->_SleepEvent;
3233    if (slp != NULL) slp->unpark();
3234  }
3235
3236  // For JSR166. Unpark even if interrupt status already was set
3237  if (thread->is_Java_thread())
3238    ((JavaThread*)thread)->parker()->unpark();
3239
3240  ParkEvent * ev = thread->_ParkEvent;
3241  if (ev != NULL) ev->unpark();
3242
3243}
3244
3245bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3246  assert(Thread::current() == thread || Threads_lock->owned_by_self(),
3247    "possibility of dangling Thread pointer");
3248
3249  OSThread* osthread = thread->osthread();
3250
3251  bool interrupted = osthread->interrupted();
3252
3253  if (interrupted && clear_interrupted) {
3254    osthread->set_interrupted(false);
3255    // consider thread->_SleepEvent->reset() ... optional optimization
3256  }
3257
3258  return interrupted;
3259}
3260
3261///////////////////////////////////////////////////////////////////////////////////
3262// signal handling (except suspend/resume)
3263
3264// This routine may be used by user applications as a "hook" to catch signals.
3265// The user-defined signal handler must pass unrecognized signals to this
3266// routine, and if it returns true (non-zero), then the signal handler must
3267// return immediately. If the flag "abort_if_unrecognized" is true, then this
3268// routine will never retun false (zero), but instead will execute a VM panic
3269// routine kill the process.
3270//
3271// If this routine returns false, it is OK to call it again. This allows
3272// the user-defined signal handler to perform checks either before or after
3273// the VM performs its own checks. Naturally, the user code would be making
3274// a serious error if it tried to handle an exception (such as a null check
3275// or breakpoint) that the VM was generating for its own correct operation.
3276//
3277// This routine may recognize any of the following kinds of signals:
3278//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3279// It should be consulted by handlers for any of those signals.
3280//
3281// The caller of this routine must pass in the three arguments supplied
3282// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3283// field of the structure passed to sigaction(). This routine assumes that
3284// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3285//
3286// Note that the VM will print warnings if it detects conflicting signal
3287// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3288//
3289extern "C" JNIEXPORT int
3290JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3291
3292// Set thread signal mask (for some reason on AIX sigthreadmask() seems
3293// to be the thing to call; documentation is not terribly clear about whether
3294// pthread_sigmask also works, and if it does, whether it does the same.
3295bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3296  const int rc = ::pthread_sigmask(how, set, oset);
3297  // return value semantics differ slightly for error case:
3298  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3299  // (so, pthread_sigmask is more theadsafe for error handling)
3300  // But success is always 0.
3301  return rc == 0 ? true : false;
3302}
3303
3304// Function to unblock all signals which are, according
3305// to POSIX, typical program error signals. If they happen while being blocked,
3306// they typically will bring down the process immediately.
3307bool unblock_program_error_signals() {
3308  sigset_t set;
3309  ::sigemptyset(&set);
3310  ::sigaddset(&set, SIGILL);
3311  ::sigaddset(&set, SIGBUS);
3312  ::sigaddset(&set, SIGFPE);
3313  ::sigaddset(&set, SIGSEGV);
3314  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3315}
3316
3317// Renamed from 'signalHandler' to avoid collision with other shared libs.
3318void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3319  assert(info != NULL && uc != NULL, "it must be old kernel");
3320
3321  // Never leave program error signals blocked;
3322  // on all our platforms they would bring down the process immediately when
3323  // getting raised while being blocked.
3324  unblock_program_error_signals();
3325
3326  JVM_handle_aix_signal(sig, info, uc, true);
3327}
3328
3329
3330// This boolean allows users to forward their own non-matching signals
3331// to JVM_handle_aix_signal, harmlessly.
3332bool os::Aix::signal_handlers_are_installed = false;
3333
3334// For signal-chaining
3335struct sigaction os::Aix::sigact[MAXSIGNUM];
3336unsigned int os::Aix::sigs = 0;
3337bool os::Aix::libjsig_is_loaded = false;
3338typedef struct sigaction *(*get_signal_t)(int);
3339get_signal_t os::Aix::get_signal_action = NULL;
3340
3341struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3342  struct sigaction *actp = NULL;
3343
3344  if (libjsig_is_loaded) {
3345    // Retrieve the old signal handler from libjsig
3346    actp = (*get_signal_action)(sig);
3347  }
3348  if (actp == NULL) {
3349    // Retrieve the preinstalled signal handler from jvm
3350    actp = get_preinstalled_handler(sig);
3351  }
3352
3353  return actp;
3354}
3355
3356static bool call_chained_handler(struct sigaction *actp, int sig,
3357                                 siginfo_t *siginfo, void *context) {
3358  Unimplemented();
3359  return true;
3360}
3361
3362bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3363  bool chained = false;
3364  // signal-chaining
3365  if (UseSignalChaining) {
3366    struct sigaction *actp = get_chained_signal_action(sig);
3367    if (actp != NULL) {
3368      chained = call_chained_handler(actp, sig, siginfo, context);
3369    }
3370  }
3371  return chained;
3372}
3373
3374struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3375  if ((((unsigned int)1 << sig) & sigs) != 0) {
3376    return &sigact[sig];
3377  }
3378  return NULL;
3379}
3380
3381void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3382  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3383  sigact[sig] = oldAct;
3384  sigs |= (unsigned int)1 << sig;
3385}
3386
3387// for diagnostic
3388int os::Aix::sigflags[MAXSIGNUM];
3389
3390int os::Aix::get_our_sigflags(int sig) {
3391  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3392  return sigflags[sig];
3393}
3394
3395void os::Aix::set_our_sigflags(int sig, int flags) {
3396  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3397  sigflags[sig] = flags;
3398}
3399
3400void os::Aix::set_signal_handler(int sig, bool set_installed) {
3401  // Check for overwrite.
3402  struct sigaction oldAct;
3403  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3404
3405  void* oldhand = oldAct.sa_sigaction
3406    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3407    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3408  // Renamed 'signalHandler' to avoid collision with other shared libs.
3409  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3410      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3411      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3412    if (AllowUserSignalHandlers || !set_installed) {
3413      // Do not overwrite; user takes responsibility to forward to us.
3414      return;
3415    } else if (UseSignalChaining) {
3416      // save the old handler in jvm
3417      save_preinstalled_handler(sig, oldAct);
3418      // libjsig also interposes the sigaction() call below and saves the
3419      // old sigaction on it own.
3420    } else {
3421      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3422                    "%#lx for signal %d.", (long)oldhand, sig));
3423    }
3424  }
3425
3426  struct sigaction sigAct;
3427  sigfillset(&(sigAct.sa_mask));
3428  if (!set_installed) {
3429    sigAct.sa_handler = SIG_DFL;
3430    sigAct.sa_flags = SA_RESTART;
3431  } else {
3432    // Renamed 'signalHandler' to avoid collision with other shared libs.
3433    sigAct.sa_sigaction = javaSignalHandler;
3434    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3435  }
3436  // Save flags, which are set by ours
3437  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3438  sigflags[sig] = sigAct.sa_flags;
3439
3440  int ret = sigaction(sig, &sigAct, &oldAct);
3441  assert(ret == 0, "check");
3442
3443  void* oldhand2 = oldAct.sa_sigaction
3444                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3445                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3446  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3447}
3448
3449// install signal handlers for signals that HotSpot needs to
3450// handle in order to support Java-level exception handling.
3451void os::Aix::install_signal_handlers() {
3452  if (!signal_handlers_are_installed) {
3453    signal_handlers_are_installed = true;
3454
3455    // signal-chaining
3456    typedef void (*signal_setting_t)();
3457    signal_setting_t begin_signal_setting = NULL;
3458    signal_setting_t end_signal_setting = NULL;
3459    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3460                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3461    if (begin_signal_setting != NULL) {
3462      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3463                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3464      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3465                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3466      libjsig_is_loaded = true;
3467      assert(UseSignalChaining, "should enable signal-chaining");
3468    }
3469    if (libjsig_is_loaded) {
3470      // Tell libjsig jvm is setting signal handlers
3471      (*begin_signal_setting)();
3472    }
3473
3474    set_signal_handler(SIGSEGV, true);
3475    set_signal_handler(SIGPIPE, true);
3476    set_signal_handler(SIGBUS, true);
3477    set_signal_handler(SIGILL, true);
3478    set_signal_handler(SIGFPE, true);
3479    set_signal_handler(SIGTRAP, true);
3480    set_signal_handler(SIGXFSZ, true);
3481    set_signal_handler(SIGDANGER, true);
3482
3483    if (libjsig_is_loaded) {
3484      // Tell libjsig jvm finishes setting signal handlers
3485      (*end_signal_setting)();
3486    }
3487
3488    // We don't activate signal checker if libjsig is in place, we trust ourselves
3489    // and if UserSignalHandler is installed all bets are off.
3490    // Log that signal checking is off only if -verbose:jni is specified.
3491    if (CheckJNICalls) {
3492      if (libjsig_is_loaded) {
3493        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3494        check_signals = false;
3495      }
3496      if (AllowUserSignalHandlers) {
3497        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3498        check_signals = false;
3499      }
3500      // need to initialize check_signal_done
3501      ::sigemptyset(&check_signal_done);
3502    }
3503  }
3504}
3505
3506static const char* get_signal_handler_name(address handler,
3507                                           char* buf, int buflen) {
3508  int offset;
3509  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3510  if (found) {
3511    // skip directory names
3512    const char *p1, *p2;
3513    p1 = buf;
3514    size_t len = strlen(os::file_separator());
3515    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3516    // The way os::dll_address_to_library_name is implemented on Aix
3517    // right now, it always returns -1 for the offset which is not
3518    // terribly informative.
3519    // Will fix that. For now, omit the offset.
3520    jio_snprintf(buf, buflen, "%s", p1);
3521  } else {
3522    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3523  }
3524  return buf;
3525}
3526
3527static void print_signal_handler(outputStream* st, int sig,
3528                                 char* buf, size_t buflen) {
3529  struct sigaction sa;
3530  sigaction(sig, NULL, &sa);
3531
3532  st->print("%s: ", os::exception_name(sig, buf, buflen));
3533
3534  address handler = (sa.sa_flags & SA_SIGINFO)
3535    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3536    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3537
3538  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3539    st->print("SIG_DFL");
3540  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3541    st->print("SIG_IGN");
3542  } else {
3543    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3544  }
3545
3546  // Print readable mask.
3547  st->print(", sa_mask[0]=");
3548  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3549
3550  address rh = VMError::get_resetted_sighandler(sig);
3551  // May be, handler was resetted by VMError?
3552  if (rh != NULL) {
3553    handler = rh;
3554    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3555  }
3556
3557  // Print textual representation of sa_flags.
3558  st->print(", sa_flags=");
3559  os::Posix::print_sa_flags(st, sa.sa_flags);
3560
3561  // Check: is it our handler?
3562  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3563      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3564    // It is our signal handler.
3565    // Check for flags, reset system-used one!
3566    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3567      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3568                os::Aix::get_our_sigflags(sig));
3569    }
3570  }
3571  st->cr();
3572}
3573
3574
3575#define DO_SIGNAL_CHECK(sig) \
3576  if (!sigismember(&check_signal_done, sig)) \
3577    os::Aix::check_signal_handler(sig)
3578
3579// This method is a periodic task to check for misbehaving JNI applications
3580// under CheckJNI, we can add any periodic checks here
3581
3582void os::run_periodic_checks() {
3583
3584  if (check_signals == false) return;
3585
3586  // SEGV and BUS if overridden could potentially prevent
3587  // generation of hs*.log in the event of a crash, debugging
3588  // such a case can be very challenging, so we absolutely
3589  // check the following for a good measure:
3590  DO_SIGNAL_CHECK(SIGSEGV);
3591  DO_SIGNAL_CHECK(SIGILL);
3592  DO_SIGNAL_CHECK(SIGFPE);
3593  DO_SIGNAL_CHECK(SIGBUS);
3594  DO_SIGNAL_CHECK(SIGPIPE);
3595  DO_SIGNAL_CHECK(SIGXFSZ);
3596  if (UseSIGTRAP) {
3597    DO_SIGNAL_CHECK(SIGTRAP);
3598  }
3599  DO_SIGNAL_CHECK(SIGDANGER);
3600
3601  // ReduceSignalUsage allows the user to override these handlers
3602  // see comments at the very top and jvm_solaris.h
3603  if (!ReduceSignalUsage) {
3604    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3605    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3606    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3607    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3608  }
3609
3610  DO_SIGNAL_CHECK(SR_signum);
3611  DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3612}
3613
3614typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3615
3616static os_sigaction_t os_sigaction = NULL;
3617
3618void os::Aix::check_signal_handler(int sig) {
3619  char buf[O_BUFLEN];
3620  address jvmHandler = NULL;
3621
3622  struct sigaction act;
3623  if (os_sigaction == NULL) {
3624    // only trust the default sigaction, in case it has been interposed
3625    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3626    if (os_sigaction == NULL) return;
3627  }
3628
3629  os_sigaction(sig, (struct sigaction*)NULL, &act);
3630
3631  address thisHandler = (act.sa_flags & SA_SIGINFO)
3632    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3633    : CAST_FROM_FN_PTR(address, act.sa_handler);
3634
3635
3636  switch(sig) {
3637  case SIGSEGV:
3638  case SIGBUS:
3639  case SIGFPE:
3640  case SIGPIPE:
3641  case SIGILL:
3642  case SIGXFSZ:
3643    // Renamed 'signalHandler' to avoid collision with other shared libs.
3644    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3645    break;
3646
3647  case SHUTDOWN1_SIGNAL:
3648  case SHUTDOWN2_SIGNAL:
3649  case SHUTDOWN3_SIGNAL:
3650  case BREAK_SIGNAL:
3651    jvmHandler = (address)user_handler();
3652    break;
3653
3654  case INTERRUPT_SIGNAL:
3655    jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3656    break;
3657
3658  default:
3659    if (sig == SR_signum) {
3660      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3661    } else {
3662      return;
3663    }
3664    break;
3665  }
3666
3667  if (thisHandler != jvmHandler) {
3668    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3669    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3670    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3671    // No need to check this sig any longer
3672    sigaddset(&check_signal_done, sig);
3673  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3674    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3675    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3676    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3677    // No need to check this sig any longer
3678    sigaddset(&check_signal_done, sig);
3679  }
3680
3681  // Dump all the signal
3682  if (sigismember(&check_signal_done, sig)) {
3683    print_signal_handlers(tty, buf, O_BUFLEN);
3684  }
3685}
3686
3687extern bool signal_name(int signo, char* buf, size_t len);
3688
3689const char* os::exception_name(int exception_code, char* buf, size_t size) {
3690  if (0 < exception_code && exception_code <= SIGRTMAX) {
3691    // signal
3692    if (!signal_name(exception_code, buf, size)) {
3693      jio_snprintf(buf, size, "SIG%d", exception_code);
3694    }
3695    return buf;
3696  } else {
3697    return NULL;
3698  }
3699}
3700
3701// To install functions for atexit system call
3702extern "C" {
3703  static void perfMemory_exit_helper() {
3704    perfMemory_exit();
3705  }
3706}
3707
3708// This is called _before_ the most of global arguments have been parsed.
3709void os::init(void) {
3710  // This is basic, we want to know if that ever changes.
3711  // (shared memory boundary is supposed to be a 256M aligned)
3712  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3713
3714  // First off, we need to know whether we run on AIX or PASE, and
3715  // the OS level we run on.
3716  os::Aix::initialize_os_info();
3717
3718  // Scan environment (SPEC1170 behaviour, etc)
3719  os::Aix::scan_environment();
3720
3721  // Check which pages are supported by AIX.
3722  os::Aix::query_multipage_support();
3723
3724  // Next, we need to initialize libo4 and libperfstat libraries.
3725  if (os::Aix::on_pase()) {
3726    os::Aix::initialize_libo4();
3727  } else {
3728    os::Aix::initialize_libperfstat();
3729  }
3730
3731  // Reset the perfstat information provided by ODM.
3732  if (os::Aix::on_aix()) {
3733    libperfstat::perfstat_reset();
3734  }
3735
3736  // Now initialze basic system properties. Note that for some of the values we
3737  // need libperfstat etc.
3738  os::Aix::initialize_system_info();
3739
3740  // Initialize large page support.
3741  if (UseLargePages) {
3742    os::large_page_init();
3743    if (!UseLargePages) {
3744      // initialize os::_page_sizes
3745      _page_sizes[0] = Aix::page_size();
3746      _page_sizes[1] = 0;
3747      if (Verbose) {
3748        fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3749      }
3750    }
3751  } else {
3752    // initialize os::_page_sizes
3753    _page_sizes[0] = Aix::page_size();
3754    _page_sizes[1] = 0;
3755  }
3756
3757  // debug trace
3758  if (Verbose) {
3759    fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3760    fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3761    fprintf(stderr, "os::_page_sizes = ( ");
3762    for (int i = 0; _page_sizes[i]; i ++) {
3763      fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3764    }
3765    fprintf(stderr, ")\n");
3766  }
3767
3768  _initial_pid = getpid();
3769
3770  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3771
3772  init_random(1234567);
3773
3774  ThreadCritical::initialize();
3775
3776  // Main_thread points to the aboriginal thread.
3777  Aix::_main_thread = pthread_self();
3778
3779  initial_time_count = os::elapsed_counter();
3780  pthread_mutex_init(&dl_mutex, NULL);
3781}
3782
3783// this is called _after_ the global arguments have been parsed
3784jint os::init_2(void) {
3785
3786  if (Verbose) {
3787    fprintf(stderr, "processor count: %d\n", os::_processor_count);
3788    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
3789  }
3790
3791  // initially build up the loaded dll map
3792  LoadedLibraries::reload();
3793
3794  const int page_size = Aix::page_size();
3795  const int map_size = page_size;
3796
3797  address map_address = (address) MAP_FAILED;
3798  const int prot  = PROT_READ;
3799  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3800
3801  // use optimized addresses for the polling page,
3802  // e.g. map it to a special 32-bit address.
3803  if (OptimizePollingPageLocation) {
3804    // architecture-specific list of address wishes:
3805    address address_wishes[] = {
3806      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3807      // PPC64: all address wishes are non-negative 32 bit values where
3808      // the lower 16 bits are all zero. we can load these addresses
3809      // with a single ppc_lis instruction.
3810      (address) 0x30000000, (address) 0x31000000,
3811      (address) 0x32000000, (address) 0x33000000,
3812      (address) 0x40000000, (address) 0x41000000,
3813      (address) 0x42000000, (address) 0x43000000,
3814      (address) 0x50000000, (address) 0x51000000,
3815      (address) 0x52000000, (address) 0x53000000,
3816      (address) 0x60000000, (address) 0x61000000,
3817      (address) 0x62000000, (address) 0x63000000
3818    };
3819    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3820
3821    // iterate over the list of address wishes:
3822    for (int i=0; i<address_wishes_length; i++) {
3823      // try to map with current address wish.
3824      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3825      // fail if the address is already mapped.
3826      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3827                                     map_size, prot,
3828                                     flags | MAP_FIXED,
3829                                     -1, 0);
3830      if (Verbose) {
3831        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3832                address_wishes[i], map_address + (ssize_t)page_size);
3833      }
3834
3835      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3836        // map succeeded and map_address is at wished address, exit loop.
3837        break;
3838      }
3839
3840      if (map_address != (address) MAP_FAILED) {
3841        // map succeeded, but polling_page is not at wished address, unmap and continue.
3842        ::munmap(map_address, map_size);
3843        map_address = (address) MAP_FAILED;
3844      }
3845      // map failed, continue loop.
3846    }
3847  } // end OptimizePollingPageLocation
3848
3849  if (map_address == (address) MAP_FAILED) {
3850    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3851  }
3852  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3853  os::set_polling_page(map_address);
3854
3855  if (!UseMembar) {
3856    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3857    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3858    os::set_memory_serialize_page(mem_serialize_page);
3859
3860#ifndef PRODUCT
3861    if (Verbose && PrintMiscellaneous)
3862      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3863#endif
3864  }
3865
3866  // initialize suspend/resume support - must do this before signal_sets_init()
3867  if (SR_initialize() != 0) {
3868    perror("SR_initialize failed");
3869    return JNI_ERR;
3870  }
3871
3872  Aix::signal_sets_init();
3873  Aix::install_signal_handlers();
3874
3875  // Check minimum allowable stack size for thread creation and to initialize
3876  // the java system classes, including StackOverflowError - depends on page
3877  // size. Add a page for compiler2 recursion in main thread.
3878  // Add in 2*BytesPerWord times page size to account for VM stack during
3879  // class initialization depending on 32 or 64 bit VM.
3880  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3881            (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3882                     2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3883
3884  size_t threadStackSizeInBytes = ThreadStackSize * K;
3885  if (threadStackSizeInBytes != 0 &&
3886      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3887        tty->print_cr("\nThe stack size specified is too small, "
3888                      "Specify at least %dk",
3889                      os::Aix::min_stack_allowed / K);
3890        return JNI_ERR;
3891  }
3892
3893  // Make the stack size a multiple of the page size so that
3894  // the yellow/red zones can be guarded.
3895  // note that this can be 0, if no default stacksize was set
3896  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3897
3898  Aix::libpthread_init();
3899
3900  if (MaxFDLimit) {
3901    // set the number of file descriptors to max. print out error
3902    // if getrlimit/setrlimit fails but continue regardless.
3903    struct rlimit nbr_files;
3904    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3905    if (status != 0) {
3906      if (PrintMiscellaneous && (Verbose || WizardMode))
3907        perror("os::init_2 getrlimit failed");
3908    } else {
3909      nbr_files.rlim_cur = nbr_files.rlim_max;
3910      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3911      if (status != 0) {
3912        if (PrintMiscellaneous && (Verbose || WizardMode))
3913          perror("os::init_2 setrlimit failed");
3914      }
3915    }
3916  }
3917
3918  if (PerfAllowAtExitRegistration) {
3919    // only register atexit functions if PerfAllowAtExitRegistration is set.
3920    // atexit functions can be delayed until process exit time, which
3921    // can be problematic for embedded VM situations. Embedded VMs should
3922    // call DestroyJavaVM() to assure that VM resources are released.
3923
3924    // note: perfMemory_exit_helper atexit function may be removed in
3925    // the future if the appropriate cleanup code can be added to the
3926    // VM_Exit VMOperation's doit method.
3927    if (atexit(perfMemory_exit_helper) != 0) {
3928      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3929    }
3930  }
3931
3932  return JNI_OK;
3933}
3934
3935// this is called at the end of vm_initialization
3936void os::init_3(void) {
3937  return;
3938}
3939
3940// Mark the polling page as unreadable
3941void os::make_polling_page_unreadable(void) {
3942  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3943    fatal("Could not disable polling page");
3944  }
3945};
3946
3947// Mark the polling page as readable
3948void os::make_polling_page_readable(void) {
3949  // Changed according to os_linux.cpp.
3950  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3951    fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3952  }
3953};
3954
3955int os::active_processor_count() {
3956  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3957  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3958  return online_cpus;
3959}
3960
3961void os::set_native_thread_name(const char *name) {
3962  // Not yet implemented.
3963  return;
3964}
3965
3966bool os::distribute_processes(uint length, uint* distribution) {
3967  // Not yet implemented.
3968  return false;
3969}
3970
3971bool os::bind_to_processor(uint processor_id) {
3972  // Not yet implemented.
3973  return false;
3974}
3975
3976void os::SuspendedThreadTask::internal_do_task() {
3977  if (do_suspend(_thread->osthread())) {
3978    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3979    do_task(context);
3980    do_resume(_thread->osthread());
3981  }
3982}
3983
3984class PcFetcher : public os::SuspendedThreadTask {
3985public:
3986  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3987  ExtendedPC result();
3988protected:
3989  void do_task(const os::SuspendedThreadTaskContext& context);
3990private:
3991  ExtendedPC _epc;
3992};
3993
3994ExtendedPC PcFetcher::result() {
3995  guarantee(is_done(), "task is not done yet.");
3996  return _epc;
3997}
3998
3999void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4000  Thread* thread = context.thread();
4001  OSThread* osthread = thread->osthread();
4002  if (osthread->ucontext() != NULL) {
4003    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
4004  } else {
4005    // NULL context is unexpected, double-check this is the VMThread.
4006    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4007  }
4008}
4009
4010// Suspends the target using the signal mechanism and then grabs the PC before
4011// resuming the target. Used by the flat-profiler only
4012ExtendedPC os::get_thread_pc(Thread* thread) {
4013  // Make sure that it is called by the watcher for the VMThread.
4014  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
4015  assert(thread->is_VM_thread(), "Can only be called for VMThread");
4016
4017  PcFetcher fetcher(thread);
4018  fetcher.run();
4019  return fetcher.result();
4020}
4021
4022// Not neede on Aix.
4023// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
4024// }
4025
4026////////////////////////////////////////////////////////////////////////////////
4027// debug support
4028
4029static address same_page(address x, address y) {
4030  intptr_t page_bits = -os::vm_page_size();
4031  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
4032    return x;
4033  else if (x > y)
4034    return (address)(intptr_t(y) | ~page_bits) + 1;
4035  else
4036    return (address)(intptr_t(y) & page_bits);
4037}
4038
4039bool os::find(address addr, outputStream* st) {
4040  Unimplemented();
4041  return false;
4042}
4043
4044////////////////////////////////////////////////////////////////////////////////
4045// misc
4046
4047// This does not do anything on Aix. This is basically a hook for being
4048// able to use structured exception handling (thread-local exception filters)
4049// on, e.g., Win32.
4050void
4051os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
4052                         JavaCallArguments* args, Thread* thread) {
4053  f(value, method, args, thread);
4054}
4055
4056void os::print_statistics() {
4057}
4058
4059int os::message_box(const char* title, const char* message) {
4060  int i;
4061  fdStream err(defaultStream::error_fd());
4062  for (i = 0; i < 78; i++) err.print_raw("=");
4063  err.cr();
4064  err.print_raw_cr(title);
4065  for (i = 0; i < 78; i++) err.print_raw("-");
4066  err.cr();
4067  err.print_raw_cr(message);
4068  for (i = 0; i < 78; i++) err.print_raw("=");
4069  err.cr();
4070
4071  char buf[16];
4072  // Prevent process from exiting upon "read error" without consuming all CPU
4073  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4074
4075  return buf[0] == 'y' || buf[0] == 'Y';
4076}
4077
4078int os::stat(const char *path, struct stat *sbuf) {
4079  char pathbuf[MAX_PATH];
4080  if (strlen(path) > MAX_PATH - 1) {
4081    errno = ENAMETOOLONG;
4082    return -1;
4083  }
4084  os::native_path(strcpy(pathbuf, path));
4085  return ::stat(pathbuf, sbuf);
4086}
4087
4088bool os::check_heap(bool force) {
4089  return true;
4090}
4091
4092// int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
4093//   return ::vsnprintf(buf, count, format, args);
4094// }
4095
4096// Is a (classpath) directory empty?
4097bool os::dir_is_empty(const char* path) {
4098  Unimplemented();
4099  return false;
4100}
4101
4102// This code originates from JDK's sysOpen and open64_w
4103// from src/solaris/hpi/src/system_md.c
4104
4105#ifndef O_DELETE
4106#define O_DELETE 0x10000
4107#endif
4108
4109// Open a file. Unlink the file immediately after open returns
4110// if the specified oflag has the O_DELETE flag set.
4111// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4112
4113int os::open(const char *path, int oflag, int mode) {
4114
4115  if (strlen(path) > MAX_PATH - 1) {
4116    errno = ENAMETOOLONG;
4117    return -1;
4118  }
4119  int fd;
4120  int o_delete = (oflag & O_DELETE);
4121  oflag = oflag & ~O_DELETE;
4122
4123  fd = ::open64(path, oflag, mode);
4124  if (fd == -1) return -1;
4125
4126  //If the open succeeded, the file might still be a directory
4127  {
4128    struct stat64 buf64;
4129    int ret = ::fstat64(fd, &buf64);
4130    int st_mode = buf64.st_mode;
4131
4132    if (ret != -1) {
4133      if ((st_mode & S_IFMT) == S_IFDIR) {
4134        errno = EISDIR;
4135        ::close(fd);
4136        return -1;
4137      }
4138    } else {
4139      ::close(fd);
4140      return -1;
4141    }
4142  }
4143
4144  // All file descriptors that are opened in the JVM and not
4145  // specifically destined for a subprocess should have the
4146  // close-on-exec flag set. If we don't set it, then careless 3rd
4147  // party native code might fork and exec without closing all
4148  // appropriate file descriptors (e.g. as we do in closeDescriptors in
4149  // UNIXProcess.c), and this in turn might:
4150  //
4151  // - cause end-of-file to fail to be detected on some file
4152  //   descriptors, resulting in mysterious hangs, or
4153  //
4154  // - might cause an fopen in the subprocess to fail on a system
4155  //   suffering from bug 1085341.
4156  //
4157  // (Yes, the default setting of the close-on-exec flag is a Unix
4158  // design flaw.)
4159  //
4160  // See:
4161  // 1085341: 32-bit stdio routines should support file descriptors >255
4162  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4163  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4164#ifdef FD_CLOEXEC
4165  {
4166    int flags = ::fcntl(fd, F_GETFD);
4167    if (flags != -1)
4168      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4169  }
4170#endif
4171
4172  if (o_delete != 0) {
4173    ::unlink(path);
4174  }
4175  return fd;
4176}
4177
4178
4179// create binary file, rewriting existing file if required
4180int os::create_binary_file(const char* path, bool rewrite_existing) {
4181  Unimplemented();
4182  return 0;
4183}
4184
4185// return current position of file pointer
4186jlong os::current_file_offset(int fd) {
4187  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4188}
4189
4190// move file pointer to the specified offset
4191jlong os::seek_to_file_offset(int fd, jlong offset) {
4192  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4193}
4194
4195// This code originates from JDK's sysAvailable
4196// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4197
4198int os::available(int fd, jlong *bytes) {
4199  jlong cur, end;
4200  int mode;
4201  struct stat64 buf64;
4202
4203  if (::fstat64(fd, &buf64) >= 0) {
4204    mode = buf64.st_mode;
4205    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4206      // XXX: is the following call interruptible? If so, this might
4207      // need to go through the INTERRUPT_IO() wrapper as for other
4208      // blocking, interruptible calls in this file.
4209      int n;
4210      if (::ioctl(fd, FIONREAD, &n) >= 0) {
4211        *bytes = n;
4212        return 1;
4213      }
4214    }
4215  }
4216  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4217    return 0;
4218  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4219    return 0;
4220  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4221    return 0;
4222  }
4223  *bytes = end - cur;
4224  return 1;
4225}
4226
4227int os::socket_available(int fd, jint *pbytes) {
4228  // Linux doc says EINTR not returned, unlike Solaris
4229  int ret = ::ioctl(fd, FIONREAD, pbytes);
4230
4231  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
4232  // is expected to return 0 on failure and 1 on success to the jdk.
4233  return (ret < 0) ? 0 : 1;
4234}
4235
4236// Map a block of memory.
4237char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4238                        char *addr, size_t bytes, bool read_only,
4239                        bool allow_exec) {
4240  Unimplemented();
4241  return NULL;
4242}
4243
4244
4245// Remap a block of memory.
4246char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4247                          char *addr, size_t bytes, bool read_only,
4248                          bool allow_exec) {
4249  // same as map_memory() on this OS
4250  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4251                        allow_exec);
4252}
4253
4254// Unmap a block of memory.
4255bool os::pd_unmap_memory(char* addr, size_t bytes) {
4256  return munmap(addr, bytes) == 0;
4257}
4258
4259// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4260// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4261// of a thread.
4262//
4263// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4264// the fast estimate available on the platform.
4265
4266jlong os::current_thread_cpu_time() {
4267  // return user + sys since the cost is the same
4268  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4269  assert(n >= 0, "negative CPU time");
4270  return n;
4271}
4272
4273jlong os::thread_cpu_time(Thread* thread) {
4274  // consistent with what current_thread_cpu_time() returns
4275  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4276  assert(n >= 0, "negative CPU time");
4277  return n;
4278}
4279
4280jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4281  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4282  assert(n >= 0, "negative CPU time");
4283  return n;
4284}
4285
4286static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4287  bool error = false;
4288
4289  jlong sys_time = 0;
4290  jlong user_time = 0;
4291
4292  // reimplemented using getthrds64().
4293  //
4294  // goes like this:
4295  // For the thread in question, get the kernel thread id. Then get the
4296  // kernel thread statistics using that id.
4297  //
4298  // This only works of course when no pthread scheduling is used,
4299  // ie there is a 1:1 relationship to kernel threads.
4300  // On AIX, see AIXTHREAD_SCOPE variable.
4301
4302  pthread_t pthtid = thread->osthread()->pthread_id();
4303
4304  // retrieve kernel thread id for the pthread:
4305  tid64_t tid = 0;
4306  struct __pthrdsinfo pinfo;
4307  // I just love those otherworldly IBM APIs which force me to hand down
4308  // dummy buffers for stuff I dont care for...
4309  char dummy[1];
4310  int dummy_size = sizeof(dummy);
4311  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4312                          dummy, &dummy_size) == 0) {
4313    tid = pinfo.__pi_tid;
4314  } else {
4315    tty->print_cr("pthread_getthrds_np failed.");
4316    error = true;
4317  }
4318
4319  // retrieve kernel timing info for that kernel thread
4320  if (!error) {
4321    struct thrdentry64 thrdentry;
4322    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4323      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4324      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4325    } else {
4326      tty->print_cr("pthread_getthrds_np failed.");
4327      error = true;
4328    }
4329  }
4330
4331  if (p_sys_time) {
4332    *p_sys_time = sys_time;
4333  }
4334
4335  if (p_user_time) {
4336    *p_user_time = user_time;
4337  }
4338
4339  if (error) {
4340    return false;
4341  }
4342
4343  return true;
4344}
4345
4346jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4347  jlong sys_time;
4348  jlong user_time;
4349
4350  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4351    return -1;
4352  }
4353
4354  return user_sys_cpu_time ? sys_time + user_time : user_time;
4355}
4356
4357void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4358  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4359  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4360  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4361  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4362}
4363
4364void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4365  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4366  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4367  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4368  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4369}
4370
4371bool os::is_thread_cpu_time_supported() {
4372  return true;
4373}
4374
4375// System loadavg support. Returns -1 if load average cannot be obtained.
4376// For now just return the system wide load average (no processor sets).
4377int os::loadavg(double values[], int nelem) {
4378
4379  // Implemented using libperfstat on AIX.
4380
4381  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4382  guarantee(values, "argument error");
4383
4384  if (os::Aix::on_pase()) {
4385    Unimplemented();
4386    return -1;
4387  } else {
4388    // AIX: use libperfstat
4389    //
4390    // See also:
4391    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4392    // /usr/include/libperfstat.h:
4393
4394    // Use the already AIX version independent get_cpuinfo.
4395    os::Aix::cpuinfo_t ci;
4396    if (os::Aix::get_cpuinfo(&ci)) {
4397      for (int i = 0; i < nelem; i++) {
4398        values[i] = ci.loadavg[i];
4399      }
4400    } else {
4401      return -1;
4402    }
4403    return nelem;
4404  }
4405}
4406
4407void os::pause() {
4408  char filename[MAX_PATH];
4409  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4410    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4411  } else {
4412    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4413  }
4414
4415  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4416  if (fd != -1) {
4417    struct stat buf;
4418    ::close(fd);
4419    while (::stat(filename, &buf) == 0) {
4420      (void)::poll(NULL, 0, 100);
4421    }
4422  } else {
4423    jio_fprintf(stderr,
4424      "Could not open pause file '%s', continuing immediately.\n", filename);
4425  }
4426}
4427
4428bool os::Aix::is_primordial_thread() {
4429  if (pthread_self() == (pthread_t)1) {
4430    return true;
4431  } else {
4432    return false;
4433  }
4434}
4435
4436// OS recognitions (PASE/AIX, OS level) call this before calling any
4437// one of Aix::on_pase(), Aix::os_version() static
4438void os::Aix::initialize_os_info() {
4439
4440  assert(_on_pase == -1 && _os_version == -1, "already called.");
4441
4442  struct utsname uts;
4443  memset(&uts, 0, sizeof(uts));
4444  strcpy(uts.sysname, "?");
4445  if (::uname(&uts) == -1) {
4446    fprintf(stderr, "uname failed (%d)\n", errno);
4447    guarantee(0, "Could not determine whether we run on AIX or PASE");
4448  } else {
4449    if (Verbose) {
4450      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4451              "node \"%s\" machine \"%s\"\n",
4452              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4453    }
4454    const int major = atoi(uts.version);
4455    assert(major > 0, "invalid OS version");
4456    const int minor = atoi(uts.release);
4457    assert(minor > 0, "invalid OS release");
4458    _os_version = (major << 8) | minor;
4459    if (strcmp(uts.sysname, "OS400") == 0) {
4460      Unimplemented();
4461    } else if (strcmp(uts.sysname, "AIX") == 0) {
4462      // We run on AIX. We do not support versions older than AIX 5.3.
4463      _on_pase = 0;
4464      if (_os_version < 0x0503) {
4465        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
4466        assert(false, "AIX release too old.");
4467      } else {
4468        if (Verbose) {
4469          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
4470        }
4471      }
4472    } else {
4473      assert(false, "unknown OS");
4474    }
4475  }
4476
4477  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4478
4479} // end: os::Aix::initialize_os_info()
4480
4481// Scan environment for important settings which might effect the VM.
4482// Trace out settings. Warn about invalid settings and/or correct them.
4483//
4484// Must run after os::Aix::initialue_os_info().
4485void os::Aix::scan_environment() {
4486
4487  char* p;
4488  int rc;
4489
4490  // Warn explicity if EXTSHM=ON is used. That switch changes how
4491  // System V shared memory behaves. One effect is that page size of
4492  // shared memory cannot be change dynamically, effectivly preventing
4493  // large pages from working.
4494  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4495  // recommendation is (in OSS notes) to switch it off.
4496  p = ::getenv("EXTSHM");
4497  if (Verbose) {
4498    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4499  }
4500  if (p && strcmp(p, "ON") == 0) {
4501    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4502    _extshm = 1;
4503  } else {
4504    _extshm = 0;
4505  }
4506
4507  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4508  // Not tested, not supported.
4509  //
4510  // Note that it might be worth the trouble to test and to require it, if only to
4511  // get useful return codes for mprotect.
4512  //
4513  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4514  // exec() ? before loading the libjvm ? ....)
4515  p = ::getenv("XPG_SUS_ENV");
4516  if (Verbose) {
4517    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
4518  }
4519  if (p && strcmp(p, "ON") == 0) {
4520    _xpg_sus_mode = 1;
4521    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
4522    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4523    // clobber address ranges. If we ever want to support that, we have to do some
4524    // testing first.
4525    guarantee(false, "XPG_SUS_ENV=ON not supported");
4526  } else {
4527    _xpg_sus_mode = 0;
4528  }
4529
4530  // Switch off AIX internal (pthread) guard pages. This has
4531  // immediate effect for any pthread_create calls which follow.
4532  p = ::getenv("AIXTHREAD_GUARDPAGES");
4533  if (Verbose) {
4534    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
4535    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
4536  }
4537  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4538  guarantee(rc == 0, "");
4539
4540} // end: os::Aix::scan_environment()
4541
4542// PASE: initialize the libo4 library (AS400 PASE porting library).
4543void os::Aix::initialize_libo4() {
4544  Unimplemented();
4545}
4546
4547// AIX: initialize the libperfstat library (we load this dynamically
4548// because it is only available on AIX.
4549void os::Aix::initialize_libperfstat() {
4550
4551  assert(os::Aix::on_aix(), "AIX only");
4552
4553  if (!libperfstat::init()) {
4554    fprintf(stderr, "libperfstat initialization failed.\n");
4555    assert(false, "libperfstat initialization failed");
4556  } else {
4557    if (Verbose) {
4558      fprintf(stderr, "libperfstat initialized.\n");
4559    }
4560  }
4561} // end: os::Aix::initialize_libperfstat
4562
4563/////////////////////////////////////////////////////////////////////////////
4564// thread stack
4565
4566// function to query the current stack size using pthread_getthrds_np
4567//
4568// ! do not change anything here unless you know what you are doing !
4569static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4570
4571  // This only works when invoked on a pthread. As we agreed not to use
4572  // primordial threads anyway, I assert here
4573  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4574
4575  // information about this api can be found (a) in the pthread.h header and
4576  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4577  //
4578  // The use of this API to find out the current stack is kind of undefined.
4579  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4580  // enough for cases where I let the pthread library create its stacks. For cases
4581  // where I create an own stack and pass this to pthread_create, it seems not to
4582  // work (the returned stack size in that case is 0).
4583
4584  pthread_t tid = pthread_self();
4585  struct __pthrdsinfo pinfo;
4586  char dummy[1]; // we only need this to satisfy the api and to not get E
4587  int dummy_size = sizeof(dummy);
4588
4589  memset(&pinfo, 0, sizeof(pinfo));
4590
4591  const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4592                                      sizeof(pinfo), dummy, &dummy_size);
4593
4594  if (rc != 0) {
4595    fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4596    guarantee(0, "pthread_getthrds_np failed");
4597  }
4598
4599  guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4600
4601  // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4602  // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4603  // Not sure what to do here - I feel inclined to forbid this use case completely.
4604  guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4605
4606  // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4607  if (p_stack_base) {
4608    (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4609  }
4610
4611  if (p_stack_size) {
4612    (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4613  }
4614
4615#ifndef PRODUCT
4616  if (Verbose) {
4617    fprintf(stderr,
4618            "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4619            ", real stack_size=" INTPTR_FORMAT
4620            ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4621            (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4622            (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4623            pinfo.__pi_stacksize - os::Aix::stack_page_size());
4624  }
4625#endif
4626
4627} // end query_stack_dimensions
4628
4629// get the current stack base from the OS (actually, the pthread library)
4630address os::current_stack_base() {
4631  address p;
4632  query_stack_dimensions(&p, 0);
4633  return p;
4634}
4635
4636// get the current stack size from the OS (actually, the pthread library)
4637size_t os::current_stack_size() {
4638  size_t s;
4639  query_stack_dimensions(0, &s);
4640  return s;
4641}
4642
4643// Refer to the comments in os_solaris.cpp park-unpark.
4644//
4645// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4646// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4647// For specifics regarding the bug see GLIBC BUGID 261237 :
4648//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4649// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4650// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4651// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4652// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4653// and monitorenter when we're using 1-0 locking. All those operations may result in
4654// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4655// of libpthread avoids the problem, but isn't practical.
4656//
4657// Possible remedies:
4658//
4659// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4660//      This is palliative and probabilistic, however. If the thread is preempted
4661//      between the call to compute_abstime() and pthread_cond_timedwait(), more
4662//      than the minimum period may have passed, and the abstime may be stale (in the
4663//      past) resultin in a hang. Using this technique reduces the odds of a hang
4664//      but the JVM is still vulnerable, particularly on heavily loaded systems.
4665//
4666// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4667//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4668//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4669//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4670//      thread.
4671//
4672// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4673//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4674//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4675//      This also works well. In fact it avoids kernel-level scalability impediments
4676//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4677//      timers in a graceful fashion.
4678//
4679// 4.   When the abstime value is in the past it appears that control returns
4680//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4681//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4682//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4683//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4684//      It may be possible to avoid reinitialization by checking the return
4685//      value from pthread_cond_timedwait(). In addition to reinitializing the
4686//      condvar we must establish the invariant that cond_signal() is only called
4687//      within critical sections protected by the adjunct mutex. This prevents
4688//      cond_signal() from "seeing" a condvar that's in the midst of being
4689//      reinitialized or that is corrupt. Sadly, this invariant obviates the
4690//      desirable signal-after-unlock optimization that avoids futile context switching.
4691//
4692//      I'm also concerned that some versions of NTPL might allocate an auxilliary
4693//      structure when a condvar is used or initialized. cond_destroy() would
4694//      release the helper structure. Our reinitialize-after-timedwait fix
4695//      put excessive stress on malloc/free and locks protecting the c-heap.
4696//
4697// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4698// It may be possible to refine (4) by checking the kernel and NTPL verisons
4699// and only enabling the work-around for vulnerable environments.
4700
4701// utility to compute the abstime argument to timedwait:
4702// millis is the relative timeout time
4703// abstime will be the absolute timeout time
4704// TODO: replace compute_abstime() with unpackTime()
4705
4706static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4707  if (millis < 0) millis = 0;
4708  struct timeval now;
4709  int status = gettimeofday(&now, NULL);
4710  assert(status == 0, "gettimeofday");
4711  jlong seconds = millis / 1000;
4712  millis %= 1000;
4713  if (seconds > 50000000) { // see man cond_timedwait(3T)
4714    seconds = 50000000;
4715  }
4716  abstime->tv_sec = now.tv_sec  + seconds;
4717  long       usec = now.tv_usec + millis * 1000;
4718  if (usec >= 1000000) {
4719    abstime->tv_sec += 1;
4720    usec -= 1000000;
4721  }
4722  abstime->tv_nsec = usec * 1000;
4723  return abstime;
4724}
4725
4726
4727// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4728// Conceptually TryPark() should be equivalent to park(0).
4729
4730int os::PlatformEvent::TryPark() {
4731  for (;;) {
4732    const int v = _Event;
4733    guarantee ((v == 0) || (v == 1), "invariant");
4734    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4735  }
4736}
4737
4738void os::PlatformEvent::park() {       // AKA "down()"
4739  // Invariant: Only the thread associated with the Event/PlatformEvent
4740  // may call park().
4741  // TODO: assert that _Assoc != NULL or _Assoc == Self
4742  int v;
4743  for (;;) {
4744    v = _Event;
4745    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4746  }
4747  guarantee (v >= 0, "invariant");
4748  if (v == 0) {
4749    // Do this the hard way by blocking ...
4750    int status = pthread_mutex_lock(_mutex);
4751    assert_status(status == 0, status, "mutex_lock");
4752    guarantee (_nParked == 0, "invariant");
4753    ++ _nParked;
4754    while (_Event < 0) {
4755      status = pthread_cond_wait(_cond, _mutex);
4756      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4757    }
4758    -- _nParked;
4759
4760    // In theory we could move the ST of 0 into _Event past the unlock(),
4761    // but then we'd need a MEMBAR after the ST.
4762    _Event = 0;
4763    status = pthread_mutex_unlock(_mutex);
4764    assert_status(status == 0, status, "mutex_unlock");
4765  }
4766  guarantee (_Event >= 0, "invariant");
4767}
4768
4769int os::PlatformEvent::park(jlong millis) {
4770  guarantee (_nParked == 0, "invariant");
4771
4772  int v;
4773  for (;;) {
4774    v = _Event;
4775    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4776  }
4777  guarantee (v >= 0, "invariant");
4778  if (v != 0) return OS_OK;
4779
4780  // We do this the hard way, by blocking the thread.
4781  // Consider enforcing a minimum timeout value.
4782  struct timespec abst;
4783  compute_abstime(&abst, millis);
4784
4785  int ret = OS_TIMEOUT;
4786  int status = pthread_mutex_lock(_mutex);
4787  assert_status(status == 0, status, "mutex_lock");
4788  guarantee (_nParked == 0, "invariant");
4789  ++_nParked;
4790
4791  // Object.wait(timo) will return because of
4792  // (a) notification
4793  // (b) timeout
4794  // (c) thread.interrupt
4795  //
4796  // Thread.interrupt and object.notify{All} both call Event::set.
4797  // That is, we treat thread.interrupt as a special case of notification.
4798  // The underlying Solaris implementation, cond_timedwait, admits
4799  // spurious/premature wakeups, but the JLS/JVM spec prevents the
4800  // JVM from making those visible to Java code. As such, we must
4801  // filter out spurious wakeups. We assume all ETIME returns are valid.
4802  //
4803  // TODO: properly differentiate simultaneous notify+interrupt.
4804  // In that case, we should propagate the notify to another waiter.
4805
4806  while (_Event < 0) {
4807    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4808    assert_status(status == 0 || status == ETIMEDOUT,
4809          status, "cond_timedwait");
4810    if (!FilterSpuriousWakeups) break;         // previous semantics
4811    if (status == ETIMEDOUT) break;
4812    // We consume and ignore EINTR and spurious wakeups.
4813  }
4814  --_nParked;
4815  if (_Event >= 0) {
4816     ret = OS_OK;
4817  }
4818  _Event = 0;
4819  status = pthread_mutex_unlock(_mutex);
4820  assert_status(status == 0, status, "mutex_unlock");
4821  assert (_nParked == 0, "invariant");
4822  return ret;
4823}
4824
4825void os::PlatformEvent::unpark() {
4826  int v, AnyWaiters;
4827  for (;;) {
4828    v = _Event;
4829    if (v > 0) {
4830      // The LD of _Event could have reordered or be satisfied
4831      // by a read-aside from this processor's write buffer.
4832      // To avoid problems execute a barrier and then
4833      // ratify the value.
4834      OrderAccess::fence();
4835      if (_Event == v) return;
4836      continue;
4837    }
4838    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4839  }
4840  if (v < 0) {
4841    // Wait for the thread associated with the event to vacate
4842    int status = pthread_mutex_lock(_mutex);
4843    assert_status(status == 0, status, "mutex_lock");
4844    AnyWaiters = _nParked;
4845
4846    if (AnyWaiters != 0) {
4847      // We intentional signal *after* dropping the lock
4848      // to avoid a common class of futile wakeups.
4849      status = pthread_cond_signal(_cond);
4850      assert_status(status == 0, status, "cond_signal");
4851    }
4852    // Mutex should be locked for pthread_cond_signal(_cond).
4853    status = pthread_mutex_unlock(_mutex);
4854    assert_status(status == 0, status, "mutex_unlock");
4855  }
4856
4857  // Note that we signal() _after dropping the lock for "immortal" Events.
4858  // This is safe and avoids a common class of futile wakeups. In rare
4859  // circumstances this can cause a thread to return prematurely from
4860  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4861  // simply re-test the condition and re-park itself.
4862}
4863
4864
4865// JSR166
4866// -------------------------------------------------------
4867
4868//
4869// The solaris and linux implementations of park/unpark are fairly
4870// conservative for now, but can be improved. They currently use a
4871// mutex/condvar pair, plus a a count.
4872// Park decrements count if > 0, else does a condvar wait. Unpark
4873// sets count to 1 and signals condvar. Only one thread ever waits
4874// on the condvar. Contention seen when trying to park implies that someone
4875// is unparking you, so don't wait. And spurious returns are fine, so there
4876// is no need to track notifications.
4877//
4878
4879#define MAX_SECS 100000000
4880//
4881// This code is common to linux and solaris and will be moved to a
4882// common place in dolphin.
4883//
4884// The passed in time value is either a relative time in nanoseconds
4885// or an absolute time in milliseconds. Either way it has to be unpacked
4886// into suitable seconds and nanoseconds components and stored in the
4887// given timespec structure.
4888// Given time is a 64-bit value and the time_t used in the timespec is only
4889// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4890// overflow if times way in the future are given. Further on Solaris versions
4891// prior to 10 there is a restriction (see cond_timedwait) that the specified
4892// number of seconds, in abstime, is less than current_time + 100,000,000.
4893// As it will be 28 years before "now + 100000000" will overflow we can
4894// ignore overflow and just impose a hard-limit on seconds using the value
4895// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4896// years from "now".
4897//
4898
4899static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4900  assert (time > 0, "convertTime");
4901
4902  struct timeval now;
4903  int status = gettimeofday(&now, NULL);
4904  assert(status == 0, "gettimeofday");
4905
4906  time_t max_secs = now.tv_sec + MAX_SECS;
4907
4908  if (isAbsolute) {
4909    jlong secs = time / 1000;
4910    if (secs > max_secs) {
4911      absTime->tv_sec = max_secs;
4912    }
4913    else {
4914      absTime->tv_sec = secs;
4915    }
4916    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4917  }
4918  else {
4919    jlong secs = time / NANOSECS_PER_SEC;
4920    if (secs >= MAX_SECS) {
4921      absTime->tv_sec = max_secs;
4922      absTime->tv_nsec = 0;
4923    }
4924    else {
4925      absTime->tv_sec = now.tv_sec + secs;
4926      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4927      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4928        absTime->tv_nsec -= NANOSECS_PER_SEC;
4929        ++absTime->tv_sec; // note: this must be <= max_secs
4930      }
4931    }
4932  }
4933  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4934  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4935  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4936  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4937}
4938
4939void Parker::park(bool isAbsolute, jlong time) {
4940  // Optional fast-path check:
4941  // Return immediately if a permit is available.
4942  if (_counter > 0) {
4943      _counter = 0;
4944      OrderAccess::fence();
4945      return;
4946  }
4947
4948  Thread* thread = Thread::current();
4949  assert(thread->is_Java_thread(), "Must be JavaThread");
4950  JavaThread *jt = (JavaThread *)thread;
4951
4952  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4953  // Check interrupt before trying to wait
4954  if (Thread::is_interrupted(thread, false)) {
4955    return;
4956  }
4957
4958  // Next, demultiplex/decode time arguments
4959  timespec absTime;
4960  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4961    return;
4962  }
4963  if (time > 0) {
4964    unpackTime(&absTime, isAbsolute, time);
4965  }
4966
4967
4968  // Enter safepoint region
4969  // Beware of deadlocks such as 6317397.
4970  // The per-thread Parker:: mutex is a classic leaf-lock.
4971  // In particular a thread must never block on the Threads_lock while
4972  // holding the Parker:: mutex. If safepoints are pending both the
4973  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4974  ThreadBlockInVM tbivm(jt);
4975
4976  // Don't wait if cannot get lock since interference arises from
4977  // unblocking. Also. check interrupt before trying wait
4978  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4979    return;
4980  }
4981
4982  int status;
4983  if (_counter > 0) { // no wait needed
4984    _counter = 0;
4985    status = pthread_mutex_unlock(_mutex);
4986    assert (status == 0, "invariant");
4987    OrderAccess::fence();
4988    return;
4989  }
4990
4991#ifdef ASSERT
4992  // Don't catch signals while blocked; let the running threads have the signals.
4993  // (This allows a debugger to break into the running thread.)
4994  sigset_t oldsigs;
4995  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4996  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4997#endif
4998
4999  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5000  jt->set_suspend_equivalent();
5001  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5002
5003  if (time == 0) {
5004    status = pthread_cond_wait (_cond, _mutex);
5005  } else {
5006    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
5007    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5008      pthread_cond_destroy (_cond);
5009      pthread_cond_init    (_cond, NULL);
5010    }
5011  }
5012  assert_status(status == 0 || status == EINTR ||
5013                status == ETIME || status == ETIMEDOUT,
5014                status, "cond_timedwait");
5015
5016#ifdef ASSERT
5017  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
5018#endif
5019
5020  _counter = 0;
5021  status = pthread_mutex_unlock(_mutex);
5022  assert_status(status == 0, status, "invariant");
5023  // If externally suspended while waiting, re-suspend
5024  if (jt->handle_special_suspend_equivalent_condition()) {
5025    jt->java_suspend_self();
5026  }
5027
5028  OrderAccess::fence();
5029}
5030
5031void Parker::unpark() {
5032  int s, status;
5033  status = pthread_mutex_lock(_mutex);
5034  assert (status == 0, "invariant");
5035  s = _counter;
5036  _counter = 1;
5037  if (s < 1) {
5038    if (WorkAroundNPTLTimedWaitHang) {
5039      status = pthread_cond_signal (_cond);
5040      assert (status == 0, "invariant");
5041      status = pthread_mutex_unlock(_mutex);
5042      assert (status == 0, "invariant");
5043    } else {
5044      status = pthread_mutex_unlock(_mutex);
5045      assert (status == 0, "invariant");
5046      status = pthread_cond_signal (_cond);
5047      assert (status == 0, "invariant");
5048    }
5049  } else {
5050    pthread_mutex_unlock(_mutex);
5051    assert (status == 0, "invariant");
5052  }
5053}
5054
5055
5056extern char** environ;
5057
5058// Run the specified command in a separate process. Return its exit value,
5059// or -1 on failure (e.g. can't fork a new process).
5060// Unlike system(), this function can be called from signal handler. It
5061// doesn't block SIGINT et al.
5062int os::fork_and_exec(char* cmd) {
5063  Unimplemented();
5064  return 0;
5065}
5066
5067// is_headless_jre()
5068//
5069// Test for the existence of xawt/libmawt.so or libawt_xawt.so
5070// in order to report if we are running in a headless jre.
5071//
5072// Since JDK8 xawt/libmawt.so is moved into the same directory
5073// as libawt.so, and renamed libawt_xawt.so
5074bool os::is_headless_jre() {
5075  struct stat statbuf;
5076  char buf[MAXPATHLEN];
5077  char libmawtpath[MAXPATHLEN];
5078  const char *xawtstr  = "/xawt/libmawt.so";
5079  const char *new_xawtstr = "/libawt_xawt.so";
5080
5081  char *p;
5082
5083  // Get path to libjvm.so
5084  os::jvm_path(buf, sizeof(buf));
5085
5086  // Get rid of libjvm.so
5087  p = strrchr(buf, '/');
5088  if (p == NULL) return false;
5089  else *p = '\0';
5090
5091  // Get rid of client or server
5092  p = strrchr(buf, '/');
5093  if (p == NULL) return false;
5094  else *p = '\0';
5095
5096  // check xawt/libmawt.so
5097  strcpy(libmawtpath, buf);
5098  strcat(libmawtpath, xawtstr);
5099  if (::stat(libmawtpath, &statbuf) == 0) return false;
5100
5101  // check libawt_xawt.so
5102  strcpy(libmawtpath, buf);
5103  strcat(libmawtpath, new_xawtstr);
5104  if (::stat(libmawtpath, &statbuf) == 0) return false;
5105
5106  return true;
5107}
5108
5109// Get the default path to the core file
5110// Returns the length of the string
5111int os::get_core_path(char* buffer, size_t bufferSize) {
5112  const char* p = get_current_directory(buffer, bufferSize);
5113
5114  if (p == NULL) {
5115    assert(p != NULL, "failed to get current directory");
5116    return 0;
5117  }
5118
5119  return strlen(buffer);
5120}
5121
5122#ifndef PRODUCT
5123void TestReserveMemorySpecial_test() {
5124  // No tests available for this platform
5125}
5126#endif
5127