os_aix.cpp revision 6628:45831d971923
1/*
2 * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26// According to the AIX OS doc #pragma alloca must be used
27// with C++ compiler before referencing the function alloca()
28#pragma alloca
29
30// no precompiled headers
31#include "classfile/classLoader.hpp"
32#include "classfile/systemDictionary.hpp"
33#include "classfile/vmSymbols.hpp"
34#include "code/icBuffer.hpp"
35#include "code/vtableStubs.hpp"
36#include "compiler/compileBroker.hpp"
37#include "interpreter/interpreter.hpp"
38#include "jvm_aix.h"
39#include "libperfstat_aix.hpp"
40#include "loadlib_aix.hpp"
41#include "memory/allocation.inline.hpp"
42#include "memory/filemap.hpp"
43#include "mutex_aix.inline.hpp"
44#include "oops/oop.inline.hpp"
45#include "os_share_aix.hpp"
46#include "porting_aix.hpp"
47#include "prims/jniFastGetField.hpp"
48#include "prims/jvm.h"
49#include "prims/jvm_misc.hpp"
50#include "runtime/arguments.hpp"
51#include "runtime/extendedPC.hpp"
52#include "runtime/globals.hpp"
53#include "runtime/interfaceSupport.hpp"
54#include "runtime/java.hpp"
55#include "runtime/javaCalls.hpp"
56#include "runtime/mutexLocker.hpp"
57#include "runtime/objectMonitor.hpp"
58#include "runtime/orderAccess.inline.hpp"
59#include "runtime/osThread.hpp"
60#include "runtime/perfMemory.hpp"
61#include "runtime/sharedRuntime.hpp"
62#include "runtime/statSampler.hpp"
63#include "runtime/stubRoutines.hpp"
64#include "runtime/thread.inline.hpp"
65#include "runtime/threadCritical.hpp"
66#include "runtime/timer.hpp"
67#include "services/attachListener.hpp"
68#include "services/runtimeService.hpp"
69#include "utilities/decoder.hpp"
70#include "utilities/defaultStream.hpp"
71#include "utilities/events.hpp"
72#include "utilities/growableArray.hpp"
73#include "utilities/vmError.hpp"
74
75// put OS-includes here (sorted alphabetically)
76#include <errno.h>
77#include <fcntl.h>
78#include <inttypes.h>
79#include <poll.h>
80#include <procinfo.h>
81#include <pthread.h>
82#include <pwd.h>
83#include <semaphore.h>
84#include <signal.h>
85#include <stdint.h>
86#include <stdio.h>
87#include <string.h>
88#include <unistd.h>
89#include <sys/ioctl.h>
90#include <sys/ipc.h>
91#include <sys/mman.h>
92#include <sys/resource.h>
93#include <sys/select.h>
94#include <sys/shm.h>
95#include <sys/socket.h>
96#include <sys/stat.h>
97#include <sys/sysinfo.h>
98#include <sys/systemcfg.h>
99#include <sys/time.h>
100#include <sys/times.h>
101#include <sys/types.h>
102#include <sys/utsname.h>
103#include <sys/vminfo.h>
104#include <sys/wait.h>
105
106// Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
107#if !defined(_AIXVERSION_610)
108extern "C" {
109  int getthrds64(pid_t ProcessIdentifier,
110                 struct thrdentry64* ThreadBuffer,
111                 int ThreadSize,
112                 tid64_t* IndexPointer,
113                 int Count);
114}
115#endif
116
117// Excerpts from systemcfg.h definitions newer than AIX 5.3
118#ifndef PV_7
119# define PV_7 0x200000          // Power PC 7
120# define PV_7_Compat 0x208000   // Power PC 7
121#endif
122
123#define MAX_PATH (2 * K)
124
125// for timer info max values which include all bits
126#define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
127// for multipage initialization error analysis (in 'g_multipage_error')
128#define ERROR_MP_OS_TOO_OLD                          100
129#define ERROR_MP_EXTSHM_ACTIVE                       101
130#define ERROR_MP_VMGETINFO_FAILED                    102
131#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
132
133// the semantics in this file are thus that codeptr_t is a *real code ptr*
134// This means that any function taking codeptr_t as arguments will assume
135// a real codeptr and won't handle function descriptors (eg getFuncName),
136// whereas functions taking address as args will deal with function
137// descriptors (eg os::dll_address_to_library_name)
138typedef unsigned int* codeptr_t;
139
140// typedefs for stackslots, stack pointers, pointers to op codes
141typedef unsigned long stackslot_t;
142typedef stackslot_t* stackptr_t;
143
144// query dimensions of the stack of the calling thread
145static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
146
147// function to check a given stack pointer against given stack limits
148inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
149  if (((uintptr_t)sp) & 0x7) {
150    return false;
151  }
152  if (sp > stack_base) {
153    return false;
154  }
155  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
156    return false;
157  }
158  return true;
159}
160
161// returns true if function is a valid codepointer
162inline bool is_valid_codepointer(codeptr_t p) {
163  if (!p) {
164    return false;
165  }
166  if (((uintptr_t)p) & 0x3) {
167    return false;
168  }
169  if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
170    return false;
171  }
172  return true;
173}
174
175// macro to check a given stack pointer against given stack limits and to die if test fails
176#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
177    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
178}
179
180// macro to check the current stack pointer against given stacklimits
181#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
182  address sp; \
183  sp = os::current_stack_pointer(); \
184  CHECK_STACK_PTR(sp, stack_base, stack_size); \
185}
186
187////////////////////////////////////////////////////////////////////////////////
188// global variables (for a description see os_aix.hpp)
189
190julong    os::Aix::_physical_memory = 0;
191pthread_t os::Aix::_main_thread = ((pthread_t)0);
192int       os::Aix::_page_size = -1;
193int       os::Aix::_on_pase = -1;
194int       os::Aix::_os_version = -1;
195int       os::Aix::_stack_page_size = -1;
196size_t    os::Aix::_shm_default_page_size = -1;
197int       os::Aix::_can_use_64K_pages = -1;
198int       os::Aix::_can_use_16M_pages = -1;
199int       os::Aix::_xpg_sus_mode = -1;
200int       os::Aix::_extshm = -1;
201int       os::Aix::_logical_cpus = -1;
202
203////////////////////////////////////////////////////////////////////////////////
204// local variables
205
206static int      g_multipage_error  = -1;   // error analysis for multipage initialization
207static jlong    initial_time_count = 0;
208static int      clock_tics_per_sec = 100;
209static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
210static bool     check_signals      = true;
211static pid_t    _initial_pid       = 0;
212static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
213static sigset_t SR_sigset;
214static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
215
216julong os::available_memory() {
217  return Aix::available_memory();
218}
219
220julong os::Aix::available_memory() {
221  os::Aix::meminfo_t mi;
222  if (os::Aix::get_meminfo(&mi)) {
223    return mi.real_free;
224  } else {
225    return 0xFFFFFFFFFFFFFFFFLL;
226  }
227}
228
229julong os::physical_memory() {
230  return Aix::physical_memory();
231}
232
233////////////////////////////////////////////////////////////////////////////////
234// environment support
235
236bool os::getenv(const char* name, char* buf, int len) {
237  const char* val = ::getenv(name);
238  if (val != NULL && strlen(val) < (size_t)len) {
239    strcpy(buf, val);
240    return true;
241  }
242  if (len > 0) buf[0] = 0;  // return a null string
243  return false;
244}
245
246
247// Return true if user is running as root.
248
249bool os::have_special_privileges() {
250  static bool init = false;
251  static bool privileges = false;
252  if (!init) {
253    privileges = (getuid() != geteuid()) || (getgid() != getegid());
254    init = true;
255  }
256  return privileges;
257}
258
259// Helper function, emulates disclaim64 using multiple 32bit disclaims
260// because we cannot use disclaim64() on AS/400 and old AIX releases.
261static bool my_disclaim64(char* addr, size_t size) {
262
263  if (size == 0) {
264    return true;
265  }
266
267  // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
268  const unsigned int maxDisclaimSize = 0x80000000;
269
270  const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
271  const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
272
273  char* p = addr;
274
275  for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
276    if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
277      //if (Verbose)
278      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
279      return false;
280    }
281    p += maxDisclaimSize;
282  }
283
284  if (lastDisclaimSize > 0) {
285    if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
286      //if (Verbose)
287        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
288      return false;
289    }
290  }
291
292  return true;
293}
294
295// Cpu architecture string
296#if defined(PPC32)
297static char cpu_arch[] = "ppc";
298#elif defined(PPC64)
299static char cpu_arch[] = "ppc64";
300#else
301#error Add appropriate cpu_arch setting
302#endif
303
304
305// Given an address, returns the size of the page backing that address.
306size_t os::Aix::query_pagesize(void* addr) {
307
308  vm_page_info pi;
309  pi.addr = (uint64_t)addr;
310  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
311    return pi.pagesize;
312  } else {
313    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
314    assert(false, "vmgetinfo failed to retrieve page size");
315    return SIZE_4K;
316  }
317
318}
319
320// Returns the kernel thread id of the currently running thread.
321pid_t os::Aix::gettid() {
322  return (pid_t) thread_self();
323}
324
325void os::Aix::initialize_system_info() {
326
327  // get the number of online(logical) cpus instead of configured
328  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
329  assert(_processor_count > 0, "_processor_count must be > 0");
330
331  // retrieve total physical storage
332  os::Aix::meminfo_t mi;
333  if (!os::Aix::get_meminfo(&mi)) {
334    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
335    assert(false, "os::Aix::get_meminfo failed.");
336  }
337  _physical_memory = (julong) mi.real_total;
338}
339
340// Helper function for tracing page sizes.
341static const char* describe_pagesize(size_t pagesize) {
342  switch (pagesize) {
343    case SIZE_4K : return "4K";
344    case SIZE_64K: return "64K";
345    case SIZE_16M: return "16M";
346    case SIZE_16G: return "16G";
347    default:
348      assert(false, "surprise");
349      return "??";
350  }
351}
352
353// Retrieve information about multipage size support. Will initialize
354// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
355// Aix::_can_use_16M_pages.
356// Must be called before calling os::large_page_init().
357void os::Aix::query_multipage_support() {
358
359  guarantee(_page_size == -1 &&
360            _stack_page_size == -1 &&
361            _can_use_64K_pages == -1 &&
362            _can_use_16M_pages == -1 &&
363            g_multipage_error == -1,
364            "do not call twice");
365
366  _page_size = ::sysconf(_SC_PAGESIZE);
367
368  // This really would surprise me.
369  assert(_page_size == SIZE_4K, "surprise!");
370
371
372  // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
373  // Default data page size is influenced either by linker options (-bdatapsize)
374  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
375  // default should be 4K.
376  size_t data_page_size = SIZE_4K;
377  {
378    void* p = ::malloc(SIZE_16M);
379    guarantee(p != NULL, "malloc failed");
380    data_page_size = os::Aix::query_pagesize(p);
381    ::free(p);
382  }
383
384  // query default shm page size (LDR_CNTRL SHMPSIZE)
385  {
386    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
387    guarantee(shmid != -1, "shmget failed");
388    void* p = ::shmat(shmid, NULL, 0);
389    ::shmctl(shmid, IPC_RMID, NULL);
390    guarantee(p != (void*) -1, "shmat failed");
391    _shm_default_page_size = os::Aix::query_pagesize(p);
392    ::shmdt(p);
393  }
394
395  // before querying the stack page size, make sure we are not running as primordial
396  // thread (because primordial thread's stack may have different page size than
397  // pthread thread stacks). Running a VM on the primordial thread won't work for a
398  // number of reasons so we may just as well guarantee it here
399  guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
400
401  // query stack page size
402  {
403    int dummy = 0;
404    _stack_page_size = os::Aix::query_pagesize(&dummy);
405    // everything else would surprise me and should be looked into
406    guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
407    // also, just for completeness: pthread stacks are allocated from C heap, so
408    // stack page size should be the same as data page size
409    guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
410  }
411
412  // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
413  // for system V shm.
414  if (Aix::extshm()) {
415    if (Verbose) {
416      fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
417                      "Please make sure EXTSHM is OFF for large page support.\n");
418    }
419    g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
420    _can_use_64K_pages = _can_use_16M_pages = 0;
421    goto query_multipage_support_end;
422  }
423
424  // now check which page sizes the OS claims it supports, and of those, which actually can be used.
425  {
426    const int MAX_PAGE_SIZES = 4;
427    psize_t sizes[MAX_PAGE_SIZES];
428    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
429    if (num_psizes == -1) {
430      if (Verbose) {
431        fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
432        fprintf(stderr, "disabling multipage support.\n");
433      }
434      g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
435      _can_use_64K_pages = _can_use_16M_pages = 0;
436      goto query_multipage_support_end;
437    }
438    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
439    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
440    if (Verbose) {
441      fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
442      for (int i = 0; i < num_psizes; i ++) {
443        fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
444      }
445      fprintf(stderr, " .\n");
446    }
447
448    // Can we use 64K, 16M pages?
449    _can_use_64K_pages = 0;
450    _can_use_16M_pages = 0;
451    for (int i = 0; i < num_psizes; i ++) {
452      if (sizes[i] == SIZE_64K) {
453        _can_use_64K_pages = 1;
454      } else if (sizes[i] == SIZE_16M) {
455        _can_use_16M_pages = 1;
456      }
457    }
458
459    if (!_can_use_64K_pages) {
460      g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
461    }
462
463    // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
464    // there must be an actual 16M page pool, and we must run with enough rights.
465    if (_can_use_16M_pages) {
466      const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
467      guarantee(shmid != -1, "shmget failed");
468      struct shmid_ds shm_buf = { 0 };
469      shm_buf.shm_pagesize = SIZE_16M;
470      const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
471      const int en = errno;
472      ::shmctl(shmid, IPC_RMID, NULL);
473      if (!can_set_pagesize) {
474        if (Verbose) {
475          fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
476                          "Will deactivate 16M support.\n", en, strerror(en));
477        }
478        _can_use_16M_pages = 0;
479      }
480    }
481
482  } // end: check which pages can be used for shared memory
483
484query_multipage_support_end:
485
486  guarantee(_page_size != -1 &&
487            _stack_page_size != -1 &&
488            _can_use_64K_pages != -1 &&
489            _can_use_16M_pages != -1, "Page sizes not properly initialized");
490
491  if (_can_use_64K_pages) {
492    g_multipage_error = 0;
493  }
494
495  if (Verbose) {
496    fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
497    fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
498    fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
499    fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
500    fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
501    fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
502  }
503
504} // end os::Aix::query_multipage_support()
505
506// The code for this method was initially derived from the version in os_linux.cpp.
507void os::init_system_properties_values() {
508
509#define DEFAULT_LIBPATH "/usr/lib:/lib"
510#define EXTENSIONS_DIR  "/lib/ext"
511#define ENDORSED_DIR    "/lib/endorsed"
512
513  // Buffer that fits several sprintfs.
514  // Note that the space for the trailing null is provided
515  // by the nulls included by the sizeof operator.
516  const size_t bufsize =
517    MAX3((size_t)MAXPATHLEN,  // For dll_dir & friends.
518         (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
519         (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
520  char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
521
522  // sysclasspath, java_home, dll_dir
523  {
524    char *pslash;
525    os::jvm_path(buf, bufsize);
526
527    // Found the full path to libjvm.so.
528    // Now cut the path to <java_home>/jre if we can.
529    *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
530    pslash = strrchr(buf, '/');
531    if (pslash != NULL) {
532      *pslash = '\0';            // Get rid of /{client|server|hotspot}.
533    }
534    Arguments::set_dll_dir(buf);
535
536    if (pslash != NULL) {
537      pslash = strrchr(buf, '/');
538      if (pslash != NULL) {
539        *pslash = '\0';          // Get rid of /<arch>.
540        pslash = strrchr(buf, '/');
541        if (pslash != NULL) {
542          *pslash = '\0';        // Get rid of /lib.
543        }
544      }
545    }
546    Arguments::set_java_home(buf);
547    set_boot_path('/', ':');
548  }
549
550  // Where to look for native libraries.
551
552  // On Aix we get the user setting of LIBPATH.
553  // Eventually, all the library path setting will be done here.
554  // Get the user setting of LIBPATH.
555  const char *v = ::getenv("LIBPATH");
556  const char *v_colon = ":";
557  if (v == NULL) { v = ""; v_colon = ""; }
558
559  // Concatenate user and invariant part of ld_library_path.
560  // That's +1 for the colon and +1 for the trailing '\0'.
561  char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
562  sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
563  Arguments::set_library_path(ld_library_path);
564  FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
565
566  // Extensions directories.
567  sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
568  Arguments::set_ext_dirs(buf);
569
570  // Endorsed standards default directory.
571  sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
572  Arguments::set_endorsed_dirs(buf);
573
574  FREE_C_HEAP_ARRAY(char, buf, mtInternal);
575
576#undef DEFAULT_LIBPATH
577#undef EXTENSIONS_DIR
578#undef ENDORSED_DIR
579}
580
581////////////////////////////////////////////////////////////////////////////////
582// breakpoint support
583
584void os::breakpoint() {
585  BREAKPOINT;
586}
587
588extern "C" void breakpoint() {
589  // use debugger to set breakpoint here
590}
591
592////////////////////////////////////////////////////////////////////////////////
593// signal support
594
595debug_only(static bool signal_sets_initialized = false);
596static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
597
598bool os::Aix::is_sig_ignored(int sig) {
599  struct sigaction oact;
600  sigaction(sig, (struct sigaction*)NULL, &oact);
601  void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
602    : CAST_FROM_FN_PTR(void*, oact.sa_handler);
603  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
604    return true;
605  else
606    return false;
607}
608
609void os::Aix::signal_sets_init() {
610  // Should also have an assertion stating we are still single-threaded.
611  assert(!signal_sets_initialized, "Already initialized");
612  // Fill in signals that are necessarily unblocked for all threads in
613  // the VM. Currently, we unblock the following signals:
614  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
615  //                         by -Xrs (=ReduceSignalUsage));
616  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
617  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
618  // the dispositions or masks wrt these signals.
619  // Programs embedding the VM that want to use the above signals for their
620  // own purposes must, at this time, use the "-Xrs" option to prevent
621  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
622  // (See bug 4345157, and other related bugs).
623  // In reality, though, unblocking these signals is really a nop, since
624  // these signals are not blocked by default.
625  sigemptyset(&unblocked_sigs);
626  sigemptyset(&allowdebug_blocked_sigs);
627  sigaddset(&unblocked_sigs, SIGILL);
628  sigaddset(&unblocked_sigs, SIGSEGV);
629  sigaddset(&unblocked_sigs, SIGBUS);
630  sigaddset(&unblocked_sigs, SIGFPE);
631  sigaddset(&unblocked_sigs, SIGTRAP);
632  sigaddset(&unblocked_sigs, SIGDANGER);
633  sigaddset(&unblocked_sigs, SR_signum);
634
635  if (!ReduceSignalUsage) {
636   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
637     sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
638     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
639   }
640   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
641     sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
642     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
643   }
644   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
645     sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
646     sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
647   }
648  }
649  // Fill in signals that are blocked by all but the VM thread.
650  sigemptyset(&vm_sigs);
651  if (!ReduceSignalUsage)
652    sigaddset(&vm_sigs, BREAK_SIGNAL);
653  debug_only(signal_sets_initialized = true);
654}
655
656// These are signals that are unblocked while a thread is running Java.
657// (For some reason, they get blocked by default.)
658sigset_t* os::Aix::unblocked_signals() {
659  assert(signal_sets_initialized, "Not initialized");
660  return &unblocked_sigs;
661}
662
663// These are the signals that are blocked while a (non-VM) thread is
664// running Java. Only the VM thread handles these signals.
665sigset_t* os::Aix::vm_signals() {
666  assert(signal_sets_initialized, "Not initialized");
667  return &vm_sigs;
668}
669
670// These are signals that are blocked during cond_wait to allow debugger in
671sigset_t* os::Aix::allowdebug_blocked_signals() {
672  assert(signal_sets_initialized, "Not initialized");
673  return &allowdebug_blocked_sigs;
674}
675
676void os::Aix::hotspot_sigmask(Thread* thread) {
677
678  //Save caller's signal mask before setting VM signal mask
679  sigset_t caller_sigmask;
680  pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
681
682  OSThread* osthread = thread->osthread();
683  osthread->set_caller_sigmask(caller_sigmask);
684
685  pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
686
687  if (!ReduceSignalUsage) {
688    if (thread->is_VM_thread()) {
689      // Only the VM thread handles BREAK_SIGNAL ...
690      pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
691    } else {
692      // ... all other threads block BREAK_SIGNAL
693      pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
694    }
695  }
696}
697
698// retrieve memory information.
699// Returns false if something went wrong;
700// content of pmi undefined in this case.
701bool os::Aix::get_meminfo(meminfo_t* pmi) {
702
703  assert(pmi, "get_meminfo: invalid parameter");
704
705  memset(pmi, 0, sizeof(meminfo_t));
706
707  if (os::Aix::on_pase()) {
708
709    Unimplemented();
710    return false;
711
712  } else {
713
714    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
715    // See:
716    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
717    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
718    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
719    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
720
721    perfstat_memory_total_t psmt;
722    memset (&psmt, '\0', sizeof(psmt));
723    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
724    if (rc == -1) {
725      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
726      assert(0, "perfstat_memory_total() failed");
727      return false;
728    }
729
730    assert(rc == 1, "perfstat_memory_total() - weird return code");
731
732    // excerpt from
733    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
734    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
735    // The fields of perfstat_memory_total_t:
736    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
737    // u_longlong_t real_total         Total real memory (in 4 KB pages).
738    // u_longlong_t real_free          Free real memory (in 4 KB pages).
739    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
740    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
741
742    pmi->virt_total = psmt.virt_total * 4096;
743    pmi->real_total = psmt.real_total * 4096;
744    pmi->real_free = psmt.real_free * 4096;
745    pmi->pgsp_total = psmt.pgsp_total * 4096;
746    pmi->pgsp_free = psmt.pgsp_free * 4096;
747
748    return true;
749
750  }
751} // end os::Aix::get_meminfo
752
753// Retrieve global cpu information.
754// Returns false if something went wrong;
755// the content of pci is undefined in this case.
756bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
757  assert(pci, "get_cpuinfo: invalid parameter");
758  memset(pci, 0, sizeof(cpuinfo_t));
759
760  perfstat_cpu_total_t psct;
761  memset (&psct, '\0', sizeof(psct));
762
763  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
764    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
765    assert(0, "perfstat_cpu_total() failed");
766    return false;
767  }
768
769  // global cpu information
770  strcpy (pci->description, psct.description);
771  pci->processorHZ = psct.processorHZ;
772  pci->ncpus = psct.ncpus;
773  os::Aix::_logical_cpus = psct.ncpus;
774  for (int i = 0; i < 3; i++) {
775    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
776  }
777
778  // get the processor version from _system_configuration
779  switch (_system_configuration.version) {
780  case PV_7:
781    strcpy(pci->version, "Power PC 7");
782    break;
783  case PV_6_1:
784    strcpy(pci->version, "Power PC 6 DD1.x");
785    break;
786  case PV_6:
787    strcpy(pci->version, "Power PC 6");
788    break;
789  case PV_5:
790    strcpy(pci->version, "Power PC 5");
791    break;
792  case PV_5_2:
793    strcpy(pci->version, "Power PC 5_2");
794    break;
795  case PV_5_3:
796    strcpy(pci->version, "Power PC 5_3");
797    break;
798  case PV_5_Compat:
799    strcpy(pci->version, "PV_5_Compat");
800    break;
801  case PV_6_Compat:
802    strcpy(pci->version, "PV_6_Compat");
803    break;
804  case PV_7_Compat:
805    strcpy(pci->version, "PV_7_Compat");
806    break;
807  default:
808    strcpy(pci->version, "unknown");
809  }
810
811  return true;
812
813} //end os::Aix::get_cpuinfo
814
815//////////////////////////////////////////////////////////////////////////////
816// detecting pthread library
817
818void os::Aix::libpthread_init() {
819  return;
820}
821
822//////////////////////////////////////////////////////////////////////////////
823// create new thread
824
825// Thread start routine for all newly created threads
826static void *java_start(Thread *thread) {
827
828  // find out my own stack dimensions
829  {
830    // actually, this should do exactly the same as thread->record_stack_base_and_size...
831    address base = 0;
832    size_t size = 0;
833    query_stack_dimensions(&base, &size);
834    thread->set_stack_base(base);
835    thread->set_stack_size(size);
836  }
837
838  // Do some sanity checks.
839  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
840
841  // Try to randomize the cache line index of hot stack frames.
842  // This helps when threads of the same stack traces evict each other's
843  // cache lines. The threads can be either from the same JVM instance, or
844  // from different JVM instances. The benefit is especially true for
845  // processors with hyperthreading technology.
846
847  static int counter = 0;
848  int pid = os::current_process_id();
849  alloca(((pid ^ counter++) & 7) * 128);
850
851  ThreadLocalStorage::set_thread(thread);
852
853  OSThread* osthread = thread->osthread();
854
855  // thread_id is kernel thread id (similar to Solaris LWP id)
856  osthread->set_thread_id(os::Aix::gettid());
857
858  // initialize signal mask for this thread
859  os::Aix::hotspot_sigmask(thread);
860
861  // initialize floating point control register
862  os::Aix::init_thread_fpu_state();
863
864  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
865
866  // call one more level start routine
867  thread->run();
868
869  return 0;
870}
871
872bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
873
874  // We want the whole function to be synchronized.
875  ThreadCritical cs;
876
877  assert(thread->osthread() == NULL, "caller responsible");
878
879  // Allocate the OSThread object
880  OSThread* osthread = new OSThread(NULL, NULL);
881  if (osthread == NULL) {
882    return false;
883  }
884
885  // set the correct thread state
886  osthread->set_thread_type(thr_type);
887
888  // Initial state is ALLOCATED but not INITIALIZED
889  osthread->set_state(ALLOCATED);
890
891  thread->set_osthread(osthread);
892
893  // init thread attributes
894  pthread_attr_t attr;
895  pthread_attr_init(&attr);
896  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
897
898  // Make sure we run in 1:1 kernel-user-thread mode.
899  if (os::Aix::on_aix()) {
900    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
901    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
902  } // end: aix
903
904  // Start in suspended state, and in os::thread_start, wake the thread up.
905  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
906
907  // calculate stack size if it's not specified by caller
908  if (os::Aix::supports_variable_stack_size()) {
909    if (stack_size == 0) {
910      stack_size = os::Aix::default_stack_size(thr_type);
911
912      switch (thr_type) {
913      case os::java_thread:
914        // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
915        assert(JavaThread::stack_size_at_create() > 0, "this should be set");
916        stack_size = JavaThread::stack_size_at_create();
917        break;
918      case os::compiler_thread:
919        if (CompilerThreadStackSize > 0) {
920          stack_size = (size_t)(CompilerThreadStackSize * K);
921          break;
922        } // else fall through:
923          // use VMThreadStackSize if CompilerThreadStackSize is not defined
924      case os::vm_thread:
925      case os::pgc_thread:
926      case os::cgc_thread:
927      case os::watcher_thread:
928        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
929        break;
930      }
931    }
932
933    stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
934    pthread_attr_setstacksize(&attr, stack_size);
935  } //else let thread_create() pick the default value (96 K on AIX)
936
937  pthread_t tid;
938  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
939
940  pthread_attr_destroy(&attr);
941
942  if (ret != 0) {
943    if (PrintMiscellaneous && (Verbose || WizardMode)) {
944      perror("pthread_create()");
945    }
946    // Need to clean up stuff we've allocated so far
947    thread->set_osthread(NULL);
948    delete osthread;
949    return false;
950  }
951
952  // Store pthread info into the OSThread
953  osthread->set_pthread_id(tid);
954
955  return true;
956}
957
958/////////////////////////////////////////////////////////////////////////////
959// attach existing thread
960
961// bootstrap the main thread
962bool os::create_main_thread(JavaThread* thread) {
963  assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
964  return create_attached_thread(thread);
965}
966
967bool os::create_attached_thread(JavaThread* thread) {
968#ifdef ASSERT
969    thread->verify_not_published();
970#endif
971
972  // Allocate the OSThread object
973  OSThread* osthread = new OSThread(NULL, NULL);
974
975  if (osthread == NULL) {
976    return false;
977  }
978
979  // Store pthread info into the OSThread
980  osthread->set_thread_id(os::Aix::gettid());
981  osthread->set_pthread_id(::pthread_self());
982
983  // initialize floating point control register
984  os::Aix::init_thread_fpu_state();
985
986  // some sanity checks
987  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
988
989  // Initial thread state is RUNNABLE
990  osthread->set_state(RUNNABLE);
991
992  thread->set_osthread(osthread);
993
994  if (UseNUMA) {
995    int lgrp_id = os::numa_get_group_id();
996    if (lgrp_id != -1) {
997      thread->set_lgrp_id(lgrp_id);
998    }
999  }
1000
1001  // initialize signal mask for this thread
1002  // and save the caller's signal mask
1003  os::Aix::hotspot_sigmask(thread);
1004
1005  return true;
1006}
1007
1008void os::pd_start_thread(Thread* thread) {
1009  int status = pthread_continue_np(thread->osthread()->pthread_id());
1010  assert(status == 0, "thr_continue failed");
1011}
1012
1013// Free OS resources related to the OSThread
1014void os::free_thread(OSThread* osthread) {
1015  assert(osthread != NULL, "osthread not set");
1016
1017  if (Thread::current()->osthread() == osthread) {
1018    // Restore caller's signal mask
1019    sigset_t sigmask = osthread->caller_sigmask();
1020    pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1021   }
1022
1023  delete osthread;
1024}
1025
1026//////////////////////////////////////////////////////////////////////////////
1027// thread local storage
1028
1029int os::allocate_thread_local_storage() {
1030  pthread_key_t key;
1031  int rslt = pthread_key_create(&key, NULL);
1032  assert(rslt == 0, "cannot allocate thread local storage");
1033  return (int)key;
1034}
1035
1036// Note: This is currently not used by VM, as we don't destroy TLS key
1037// on VM exit.
1038void os::free_thread_local_storage(int index) {
1039  int rslt = pthread_key_delete((pthread_key_t)index);
1040  assert(rslt == 0, "invalid index");
1041}
1042
1043void os::thread_local_storage_at_put(int index, void* value) {
1044  int rslt = pthread_setspecific((pthread_key_t)index, value);
1045  assert(rslt == 0, "pthread_setspecific failed");
1046}
1047
1048extern "C" Thread* get_thread() {
1049  return ThreadLocalStorage::thread();
1050}
1051
1052////////////////////////////////////////////////////////////////////////////////
1053// time support
1054
1055// Time since start-up in seconds to a fine granularity.
1056// Used by VMSelfDestructTimer and the MemProfiler.
1057double os::elapsedTime() {
1058  return (double)(os::elapsed_counter()) * 0.000001;
1059}
1060
1061jlong os::elapsed_counter() {
1062  timeval time;
1063  int status = gettimeofday(&time, NULL);
1064  return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1065}
1066
1067jlong os::elapsed_frequency() {
1068  return (1000 * 1000);
1069}
1070
1071// For now, we say that linux does not support vtime. I have no idea
1072// whether it can actually be made to (DLD, 9/13/05).
1073
1074bool os::supports_vtime() { return false; }
1075bool os::enable_vtime()   { return false; }
1076bool os::vtime_enabled()  { return false; }
1077double os::elapsedVTime() {
1078  // better than nothing, but not much
1079  return elapsedTime();
1080}
1081
1082jlong os::javaTimeMillis() {
1083  timeval time;
1084  int status = gettimeofday(&time, NULL);
1085  assert(status != -1, "aix error at gettimeofday()");
1086  return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1087}
1088
1089// We need to manually declare mread_real_time,
1090// because IBM didn't provide a prototype in time.h.
1091// (they probably only ever tested in C, not C++)
1092extern "C"
1093int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1094
1095jlong os::javaTimeNanos() {
1096  if (os::Aix::on_pase()) {
1097    Unimplemented();
1098    return 0;
1099  }
1100  else {
1101    // On AIX use the precision of processors real time clock
1102    // or time base registers.
1103    timebasestruct_t time;
1104    int rc;
1105
1106    // If the CPU has a time register, it will be used and
1107    // we have to convert to real time first. After convertion we have following data:
1108    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1109    // time.tb_low  [nanoseconds after the last full second above]
1110    // We better use mread_real_time here instead of read_real_time
1111    // to ensure that we will get a monotonic increasing time.
1112    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1113      rc = time_base_to_time(&time, TIMEBASE_SZ);
1114      assert(rc != -1, "aix error at time_base_to_time()");
1115    }
1116    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1117  }
1118}
1119
1120void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1121  info_ptr->max_value = ALL_64_BITS;
1122  // mread_real_time() is monotonic (see 'os::javaTimeNanos()')
1123  info_ptr->may_skip_backward = false;
1124  info_ptr->may_skip_forward = false;
1125  info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
1126}
1127
1128// Return the real, user, and system times in seconds from an
1129// arbitrary fixed point in the past.
1130bool os::getTimesSecs(double* process_real_time,
1131                      double* process_user_time,
1132                      double* process_system_time) {
1133  struct tms ticks;
1134  clock_t real_ticks = times(&ticks);
1135
1136  if (real_ticks == (clock_t) (-1)) {
1137    return false;
1138  } else {
1139    double ticks_per_second = (double) clock_tics_per_sec;
1140    *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1141    *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1142    *process_real_time = ((double) real_ticks) / ticks_per_second;
1143
1144    return true;
1145  }
1146}
1147
1148
1149char * os::local_time_string(char *buf, size_t buflen) {
1150  struct tm t;
1151  time_t long_time;
1152  time(&long_time);
1153  localtime_r(&long_time, &t);
1154  jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1155               t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1156               t.tm_hour, t.tm_min, t.tm_sec);
1157  return buf;
1158}
1159
1160struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1161  return localtime_r(clock, res);
1162}
1163
1164////////////////////////////////////////////////////////////////////////////////
1165// runtime exit support
1166
1167// Note: os::shutdown() might be called very early during initialization, or
1168// called from signal handler. Before adding something to os::shutdown(), make
1169// sure it is async-safe and can handle partially initialized VM.
1170void os::shutdown() {
1171
1172  // allow PerfMemory to attempt cleanup of any persistent resources
1173  perfMemory_exit();
1174
1175  // needs to remove object in file system
1176  AttachListener::abort();
1177
1178  // flush buffered output, finish log files
1179  ostream_abort();
1180
1181  // Check for abort hook
1182  abort_hook_t abort_hook = Arguments::abort_hook();
1183  if (abort_hook != NULL) {
1184    abort_hook();
1185  }
1186
1187}
1188
1189// Note: os::abort() might be called very early during initialization, or
1190// called from signal handler. Before adding something to os::abort(), make
1191// sure it is async-safe and can handle partially initialized VM.
1192void os::abort(bool dump_core) {
1193  os::shutdown();
1194  if (dump_core) {
1195#ifndef PRODUCT
1196    fdStream out(defaultStream::output_fd());
1197    out.print_raw("Current thread is ");
1198    char buf[16];
1199    jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1200    out.print_raw_cr(buf);
1201    out.print_raw_cr("Dumping core ...");
1202#endif
1203    ::abort(); // dump core
1204  }
1205
1206  ::exit(1);
1207}
1208
1209// Die immediately, no exit hook, no abort hook, no cleanup.
1210void os::die() {
1211  ::abort();
1212}
1213
1214// Unused on Aix for now.
1215void os::set_error_file(const char *logfile) {}
1216
1217
1218// This method is a copy of JDK's sysGetLastErrorString
1219// from src/solaris/hpi/src/system_md.c
1220
1221size_t os::lasterror(char *buf, size_t len) {
1222
1223  if (errno == 0)  return 0;
1224
1225  const char *s = ::strerror(errno);
1226  size_t n = ::strlen(s);
1227  if (n >= len) {
1228    n = len - 1;
1229  }
1230  ::strncpy(buf, s, n);
1231  buf[n] = '\0';
1232  return n;
1233}
1234
1235intx os::current_thread_id() { return (intx)pthread_self(); }
1236int os::current_process_id() {
1237
1238  // This implementation returns a unique pid, the pid of the
1239  // launcher thread that starts the vm 'process'.
1240
1241  // Under POSIX, getpid() returns the same pid as the
1242  // launcher thread rather than a unique pid per thread.
1243  // Use gettid() if you want the old pre NPTL behaviour.
1244
1245  // if you are looking for the result of a call to getpid() that
1246  // returns a unique pid for the calling thread, then look at the
1247  // OSThread::thread_id() method in osThread_linux.hpp file
1248
1249  return (int)(_initial_pid ? _initial_pid : getpid());
1250}
1251
1252// DLL functions
1253
1254const char* os::dll_file_extension() { return ".so"; }
1255
1256// This must be hard coded because it's the system's temporary
1257// directory not the java application's temp directory, ala java.io.tmpdir.
1258const char* os::get_temp_directory() { return "/tmp"; }
1259
1260static bool file_exists(const char* filename) {
1261  struct stat statbuf;
1262  if (filename == NULL || strlen(filename) == 0) {
1263    return false;
1264  }
1265  return os::stat(filename, &statbuf) == 0;
1266}
1267
1268bool os::dll_build_name(char* buffer, size_t buflen,
1269                        const char* pname, const char* fname) {
1270  bool retval = false;
1271  // Copied from libhpi
1272  const size_t pnamelen = pname ? strlen(pname) : 0;
1273
1274  // Return error on buffer overflow.
1275  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1276    *buffer = '\0';
1277    return retval;
1278  }
1279
1280  if (pnamelen == 0) {
1281    snprintf(buffer, buflen, "lib%s.so", fname);
1282    retval = true;
1283  } else if (strchr(pname, *os::path_separator()) != NULL) {
1284    int n;
1285    char** pelements = split_path(pname, &n);
1286    for (int i = 0; i < n; i++) {
1287      // Really shouldn't be NULL, but check can't hurt
1288      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1289        continue; // skip the empty path values
1290      }
1291      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1292      if (file_exists(buffer)) {
1293        retval = true;
1294        break;
1295      }
1296    }
1297    // release the storage
1298    for (int i = 0; i < n; i++) {
1299      if (pelements[i] != NULL) {
1300        FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1301      }
1302    }
1303    if (pelements != NULL) {
1304      FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1305    }
1306  } else {
1307    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1308    retval = true;
1309  }
1310  return retval;
1311}
1312
1313// Check if addr is inside libjvm.so.
1314bool os::address_is_in_vm(address addr) {
1315
1316  // Input could be a real pc or a function pointer literal. The latter
1317  // would be a function descriptor residing in the data segment of a module.
1318
1319  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1320  if (lib) {
1321    if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1322      return true;
1323    } else {
1324      return false;
1325    }
1326  } else {
1327    lib = LoadedLibraries::find_for_data_address(addr);
1328    if (lib) {
1329      if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1330        return true;
1331      } else {
1332        return false;
1333      }
1334    } else {
1335      return false;
1336    }
1337  }
1338}
1339
1340// Resolve an AIX function descriptor literal to a code pointer.
1341// If the input is a valid code pointer to a text segment of a loaded module,
1342//   it is returned unchanged.
1343// If the input is a valid AIX function descriptor, it is resolved to the
1344//   code entry point.
1345// If the input is neither a valid function descriptor nor a valid code pointer,
1346//   NULL is returned.
1347static address resolve_function_descriptor_to_code_pointer(address p) {
1348
1349  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1350  if (lib) {
1351    // its a real code pointer
1352    return p;
1353  } else {
1354    lib = LoadedLibraries::find_for_data_address(p);
1355    if (lib) {
1356      // pointer to data segment, potential function descriptor
1357      address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1358      if (LoadedLibraries::find_for_text_address(code_entry)) {
1359        // Its a function descriptor
1360        return code_entry;
1361      }
1362    }
1363  }
1364  return NULL;
1365}
1366
1367bool os::dll_address_to_function_name(address addr, char *buf,
1368                                      int buflen, int *offset) {
1369  if (offset) {
1370    *offset = -1;
1371  }
1372  if (buf) {
1373    buf[0] = '\0';
1374  }
1375
1376  // Resolve function ptr literals first.
1377  addr = resolve_function_descriptor_to_code_pointer(addr);
1378  if (!addr) {
1379    return false;
1380  }
1381
1382  // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1383  return Decoder::decode(addr, buf, buflen, offset);
1384}
1385
1386static int getModuleName(codeptr_t pc,                    // [in] program counter
1387                         char* p_name, size_t namelen,    // [out] optional: function name
1388                         char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1389                         ) {
1390
1391  // initialize output parameters
1392  if (p_name && namelen > 0) {
1393    *p_name = '\0';
1394  }
1395  if (p_errmsg && errmsglen > 0) {
1396    *p_errmsg = '\0';
1397  }
1398
1399  const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1400  if (lib) {
1401    if (p_name && namelen > 0) {
1402      sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1403    }
1404    return 0;
1405  }
1406
1407  if (Verbose) {
1408    fprintf(stderr, "pc outside any module");
1409  }
1410
1411  return -1;
1412
1413}
1414
1415bool os::dll_address_to_library_name(address addr, char* buf,
1416                                     int buflen, int* offset) {
1417  if (offset) {
1418    *offset = -1;
1419  }
1420  if (buf) {
1421      buf[0] = '\0';
1422  }
1423
1424  // Resolve function ptr literals first.
1425  addr = resolve_function_descriptor_to_code_pointer(addr);
1426  if (!addr) {
1427    return false;
1428  }
1429
1430  if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1431    return true;
1432  }
1433  return false;
1434}
1435
1436// Loads .dll/.so and in case of error it checks if .dll/.so was built
1437// for the same architecture as Hotspot is running on
1438void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1439
1440  if (ebuf && ebuflen > 0) {
1441    ebuf[0] = '\0';
1442    ebuf[ebuflen - 1] = '\0';
1443  }
1444
1445  if (!filename || strlen(filename) == 0) {
1446    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1447    return NULL;
1448  }
1449
1450  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1451  void * result= ::dlopen(filename, RTLD_LAZY);
1452  if (result != NULL) {
1453    // Reload dll cache. Don't do this in signal handling.
1454    LoadedLibraries::reload();
1455    return result;
1456  } else {
1457    // error analysis when dlopen fails
1458    const char* const error_report = ::dlerror();
1459    if (error_report && ebuf && ebuflen > 0) {
1460      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1461               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1462    }
1463  }
1464  return NULL;
1465}
1466
1467// Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1468// chances are you might want to run the generated bits against glibc-2.0
1469// libdl.so, so always use locking for any version of glibc.
1470void* os::dll_lookup(void* handle, const char* name) {
1471  pthread_mutex_lock(&dl_mutex);
1472  void* res = dlsym(handle, name);
1473  pthread_mutex_unlock(&dl_mutex);
1474  return res;
1475}
1476
1477void* os::get_default_process_handle() {
1478  return (void*)::dlopen(NULL, RTLD_LAZY);
1479}
1480
1481void os::print_dll_info(outputStream *st) {
1482  st->print_cr("Dynamic libraries:");
1483  LoadedLibraries::print(st);
1484}
1485
1486void os::print_os_info(outputStream* st) {
1487  st->print("OS:");
1488
1489  st->print("uname:");
1490  struct utsname name;
1491  uname(&name);
1492  st->print(name.sysname); st->print(" ");
1493  st->print(name.nodename); st->print(" ");
1494  st->print(name.release); st->print(" ");
1495  st->print(name.version); st->print(" ");
1496  st->print(name.machine);
1497  st->cr();
1498
1499  // rlimit
1500  st->print("rlimit:");
1501  struct rlimit rlim;
1502
1503  st->print(" STACK ");
1504  getrlimit(RLIMIT_STACK, &rlim);
1505  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1506  else st->print("%uk", rlim.rlim_cur >> 10);
1507
1508  st->print(", CORE ");
1509  getrlimit(RLIMIT_CORE, &rlim);
1510  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1511  else st->print("%uk", rlim.rlim_cur >> 10);
1512
1513  st->print(", NPROC ");
1514  st->print("%d", sysconf(_SC_CHILD_MAX));
1515
1516  st->print(", NOFILE ");
1517  getrlimit(RLIMIT_NOFILE, &rlim);
1518  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1519  else st->print("%d", rlim.rlim_cur);
1520
1521  st->print(", AS ");
1522  getrlimit(RLIMIT_AS, &rlim);
1523  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1524  else st->print("%uk", rlim.rlim_cur >> 10);
1525
1526  // Print limits on DATA, because it limits the C-heap.
1527  st->print(", DATA ");
1528  getrlimit(RLIMIT_DATA, &rlim);
1529  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1530  else st->print("%uk", rlim.rlim_cur >> 10);
1531  st->cr();
1532
1533  // load average
1534  st->print("load average:");
1535  double loadavg[3] = {-1.L, -1.L, -1.L};
1536  os::loadavg(loadavg, 3);
1537  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1538  st->cr();
1539}
1540
1541void os::print_memory_info(outputStream* st) {
1542
1543  st->print_cr("Memory:");
1544
1545  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
1546  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
1547  st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1548  st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1549  st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1550  if (g_multipage_error != 0) {
1551    st->print_cr("  multipage error: %d", g_multipage_error);
1552  }
1553
1554  // print out LDR_CNTRL because it affects the default page sizes
1555  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1556  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1557
1558  const char* const extshm = ::getenv("EXTSHM");
1559  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
1560
1561  // Call os::Aix::get_meminfo() to retrieve memory statistics.
1562  os::Aix::meminfo_t mi;
1563  if (os::Aix::get_meminfo(&mi)) {
1564    char buffer[256];
1565    if (os::Aix::on_aix()) {
1566      jio_snprintf(buffer, sizeof(buffer),
1567                   "  physical total : %llu\n"
1568                   "  physical free  : %llu\n"
1569                   "  swap total     : %llu\n"
1570                   "  swap free      : %llu\n",
1571                   mi.real_total,
1572                   mi.real_free,
1573                   mi.pgsp_total,
1574                   mi.pgsp_free);
1575    } else {
1576      Unimplemented();
1577    }
1578    st->print_raw(buffer);
1579  } else {
1580    st->print_cr("  (no more information available)");
1581  }
1582}
1583
1584void os::pd_print_cpu_info(outputStream* st) {
1585  // cpu
1586  st->print("CPU:");
1587  st->print("total %d", os::processor_count());
1588  // It's not safe to query number of active processors after crash
1589  // st->print("(active %d)", os::active_processor_count());
1590  st->print(" %s", VM_Version::cpu_features());
1591  st->cr();
1592}
1593
1594void os::print_siginfo(outputStream* st, void* siginfo) {
1595  // Use common posix version.
1596  os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1597  st->cr();
1598}
1599
1600
1601static void print_signal_handler(outputStream* st, int sig,
1602                                 char* buf, size_t buflen);
1603
1604void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1605  st->print_cr("Signal Handlers:");
1606  print_signal_handler(st, SIGSEGV, buf, buflen);
1607  print_signal_handler(st, SIGBUS , buf, buflen);
1608  print_signal_handler(st, SIGFPE , buf, buflen);
1609  print_signal_handler(st, SIGPIPE, buf, buflen);
1610  print_signal_handler(st, SIGXFSZ, buf, buflen);
1611  print_signal_handler(st, SIGILL , buf, buflen);
1612  print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1613  print_signal_handler(st, SR_signum, buf, buflen);
1614  print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1615  print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1616  print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1617  print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1618  print_signal_handler(st, SIGTRAP, buf, buflen);
1619  print_signal_handler(st, SIGDANGER, buf, buflen);
1620}
1621
1622static char saved_jvm_path[MAXPATHLEN] = {0};
1623
1624// Find the full path to the current module, libjvm.so or libjvm_g.so
1625void os::jvm_path(char *buf, jint buflen) {
1626  // Error checking.
1627  if (buflen < MAXPATHLEN) {
1628    assert(false, "must use a large-enough buffer");
1629    buf[0] = '\0';
1630    return;
1631  }
1632  // Lazy resolve the path to current module.
1633  if (saved_jvm_path[0] != 0) {
1634    strcpy(buf, saved_jvm_path);
1635    return;
1636  }
1637
1638  Dl_info dlinfo;
1639  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1640  assert(ret != 0, "cannot locate libjvm");
1641  char* rp = realpath((char *)dlinfo.dli_fname, buf);
1642  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1643
1644  strcpy(saved_jvm_path, buf);
1645}
1646
1647void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1648  // no prefix required, not even "_"
1649}
1650
1651void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1652  // no suffix required
1653}
1654
1655////////////////////////////////////////////////////////////////////////////////
1656// sun.misc.Signal support
1657
1658static volatile jint sigint_count = 0;
1659
1660static void
1661UserHandler(int sig, void *siginfo, void *context) {
1662  // 4511530 - sem_post is serialized and handled by the manager thread. When
1663  // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1664  // don't want to flood the manager thread with sem_post requests.
1665  if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1666    return;
1667
1668  // Ctrl-C is pressed during error reporting, likely because the error
1669  // handler fails to abort. Let VM die immediately.
1670  if (sig == SIGINT && is_error_reported()) {
1671    os::die();
1672  }
1673
1674  os::signal_notify(sig);
1675}
1676
1677void* os::user_handler() {
1678  return CAST_FROM_FN_PTR(void*, UserHandler);
1679}
1680
1681extern "C" {
1682  typedef void (*sa_handler_t)(int);
1683  typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1684}
1685
1686void* os::signal(int signal_number, void* handler) {
1687  struct sigaction sigAct, oldSigAct;
1688
1689  sigfillset(&(sigAct.sa_mask));
1690
1691  // Do not block out synchronous signals in the signal handler.
1692  // Blocking synchronous signals only makes sense if you can really
1693  // be sure that those signals won't happen during signal handling,
1694  // when the blocking applies.  Normal signal handlers are lean and
1695  // do not cause signals. But our signal handlers tend to be "risky"
1696  // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1697  // On AIX, PASE there was a case where a SIGSEGV happened, followed
1698  // by a SIGILL, which was blocked due to the signal mask. The process
1699  // just hung forever. Better to crash from a secondary signal than to hang.
1700  sigdelset(&(sigAct.sa_mask), SIGSEGV);
1701  sigdelset(&(sigAct.sa_mask), SIGBUS);
1702  sigdelset(&(sigAct.sa_mask), SIGILL);
1703  sigdelset(&(sigAct.sa_mask), SIGFPE);
1704  sigdelset(&(sigAct.sa_mask), SIGTRAP);
1705
1706  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
1707
1708  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1709
1710  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1711    // -1 means registration failed
1712    return (void *)-1;
1713  }
1714
1715  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1716}
1717
1718void os::signal_raise(int signal_number) {
1719  ::raise(signal_number);
1720}
1721
1722//
1723// The following code is moved from os.cpp for making this
1724// code platform specific, which it is by its very nature.
1725//
1726
1727// Will be modified when max signal is changed to be dynamic
1728int os::sigexitnum_pd() {
1729  return NSIG;
1730}
1731
1732// a counter for each possible signal value
1733static volatile jint pending_signals[NSIG+1] = { 0 };
1734
1735// Linux(POSIX) specific hand shaking semaphore.
1736static sem_t sig_sem;
1737
1738void os::signal_init_pd() {
1739  // Initialize signal structures
1740  ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1741
1742  // Initialize signal semaphore
1743  int rc = ::sem_init(&sig_sem, 0, 0);
1744  guarantee(rc != -1, "sem_init failed");
1745}
1746
1747void os::signal_notify(int sig) {
1748  Atomic::inc(&pending_signals[sig]);
1749  ::sem_post(&sig_sem);
1750}
1751
1752static int check_pending_signals(bool wait) {
1753  Atomic::store(0, &sigint_count);
1754  for (;;) {
1755    for (int i = 0; i < NSIG + 1; i++) {
1756      jint n = pending_signals[i];
1757      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1758        return i;
1759      }
1760    }
1761    if (!wait) {
1762      return -1;
1763    }
1764    JavaThread *thread = JavaThread::current();
1765    ThreadBlockInVM tbivm(thread);
1766
1767    bool threadIsSuspended;
1768    do {
1769      thread->set_suspend_equivalent();
1770      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1771
1772      ::sem_wait(&sig_sem);
1773
1774      // were we externally suspended while we were waiting?
1775      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1776      if (threadIsSuspended) {
1777        //
1778        // The semaphore has been incremented, but while we were waiting
1779        // another thread suspended us. We don't want to continue running
1780        // while suspended because that would surprise the thread that
1781        // suspended us.
1782        //
1783        ::sem_post(&sig_sem);
1784
1785        thread->java_suspend_self();
1786      }
1787    } while (threadIsSuspended);
1788  }
1789}
1790
1791int os::signal_lookup() {
1792  return check_pending_signals(false);
1793}
1794
1795int os::signal_wait() {
1796  return check_pending_signals(true);
1797}
1798
1799////////////////////////////////////////////////////////////////////////////////
1800// Virtual Memory
1801
1802// AddrRange describes an immutable address range
1803//
1804// This is a helper class for the 'shared memory bookkeeping' below.
1805class AddrRange {
1806  friend class ShmBkBlock;
1807
1808  char* _start;
1809  size_t _size;
1810
1811public:
1812
1813  AddrRange(char* start, size_t size)
1814    : _start(start), _size(size)
1815  {}
1816
1817  AddrRange(const AddrRange& r)
1818    : _start(r.start()), _size(r.size())
1819  {}
1820
1821  char* start() const { return _start; }
1822  size_t size() const { return _size; }
1823  char* end() const { return _start + _size; }
1824  bool is_empty() const { return _size == 0 ? true : false; }
1825
1826  static AddrRange empty_range() { return AddrRange(NULL, 0); }
1827
1828  bool contains(const char* p) const {
1829    return start() <= p && end() > p;
1830  }
1831
1832  bool contains(const AddrRange& range) const {
1833    return start() <= range.start() && end() >= range.end();
1834  }
1835
1836  bool intersects(const AddrRange& range) const {
1837    return (range.start() <= start() && range.end() > start()) ||
1838           (range.start() < end() && range.end() >= end()) ||
1839           contains(range);
1840  }
1841
1842  bool is_same_range(const AddrRange& range) const {
1843    return start() == range.start() && size() == range.size();
1844  }
1845
1846  // return the closest inside range consisting of whole pages
1847  AddrRange find_closest_aligned_range(size_t pagesize) const {
1848    if (pagesize == 0 || is_empty()) {
1849      return empty_range();
1850    }
1851    char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1852    char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1853    if (from > to) {
1854      return empty_range();
1855    }
1856    return AddrRange(from, to - from);
1857  }
1858};
1859
1860////////////////////////////////////////////////////////////////////////////
1861// shared memory bookkeeping
1862//
1863// the os::reserve_memory() API and friends hand out different kind of memory, depending
1864// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1865//
1866// But these memory types have to be treated differently. For example, to uncommit
1867// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1868// disclaim64() is needed.
1869//
1870// Therefore we need to keep track of the allocated memory segments and their
1871// properties.
1872
1873// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1874class ShmBkBlock : public CHeapObj<mtInternal> {
1875
1876  ShmBkBlock* _next;
1877
1878protected:
1879
1880  AddrRange _range;
1881  const size_t _pagesize;
1882  const bool _pinned;
1883
1884public:
1885
1886  ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1887    : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1888
1889    assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1890    assert(!_range.is_empty(), "invalid range");
1891  }
1892
1893  virtual void print(outputStream* st) const {
1894    st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1895              _range.start(), _range.end(), _range.size(),
1896              _range.size() / _pagesize, describe_pagesize(_pagesize),
1897              _pinned ? "pinned" : "");
1898  }
1899
1900  enum Type { MMAP, SHMAT };
1901  virtual Type getType() = 0;
1902
1903  char* base() const { return _range.start(); }
1904  size_t size() const { return _range.size(); }
1905
1906  void setAddrRange(AddrRange range) {
1907    _range = range;
1908  }
1909
1910  bool containsAddress(const char* p) const {
1911    return _range.contains(p);
1912  }
1913
1914  bool containsRange(const char* p, size_t size) const {
1915    return _range.contains(AddrRange((char*)p, size));
1916  }
1917
1918  bool isSameRange(const char* p, size_t size) const {
1919    return _range.is_same_range(AddrRange((char*)p, size));
1920  }
1921
1922  virtual bool disclaim(char* p, size_t size) = 0;
1923  virtual bool release() = 0;
1924
1925  // blocks live in a list.
1926  ShmBkBlock* next() const { return _next; }
1927  void set_next(ShmBkBlock* blk) { _next = blk; }
1928
1929}; // end: ShmBkBlock
1930
1931
1932// ShmBkMappedBlock: describes an block allocated with mmap()
1933class ShmBkMappedBlock : public ShmBkBlock {
1934public:
1935
1936  ShmBkMappedBlock(AddrRange range)
1937    : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1938
1939  void print(outputStream* st) const {
1940    ShmBkBlock::print(st);
1941    st->print_cr(" - mmap'ed");
1942  }
1943
1944  Type getType() {
1945    return MMAP;
1946  }
1947
1948  bool disclaim(char* p, size_t size) {
1949
1950    AddrRange r(p, size);
1951
1952    guarantee(_range.contains(r), "invalid disclaim");
1953
1954    // only disclaim whole ranges.
1955    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1956    if (r2.is_empty()) {
1957      return true;
1958    }
1959
1960    const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1961
1962    if (rc != 0) {
1963      warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1964    }
1965
1966    return rc == 0 ? true : false;
1967  }
1968
1969  bool release() {
1970    // mmap'ed blocks are released using munmap
1971    if (::munmap(_range.start(), _range.size()) != 0) {
1972      warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1973      return false;
1974    }
1975    return true;
1976  }
1977}; // end: ShmBkMappedBlock
1978
1979// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
1980class ShmBkShmatedBlock : public ShmBkBlock {
1981public:
1982
1983  ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
1984    : ShmBkBlock(range, pagesize, pinned) {}
1985
1986  void print(outputStream* st) const {
1987    ShmBkBlock::print(st);
1988    st->print_cr(" - shmat'ed");
1989  }
1990
1991  Type getType() {
1992    return SHMAT;
1993  }
1994
1995  bool disclaim(char* p, size_t size) {
1996
1997    AddrRange r(p, size);
1998
1999    if (_pinned) {
2000      return true;
2001    }
2002
2003    // shmat'ed blocks are disclaimed using disclaim64
2004    guarantee(_range.contains(r), "invalid disclaim");
2005
2006    // only disclaim whole ranges.
2007    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2008    if (r2.is_empty()) {
2009      return true;
2010    }
2011
2012    const bool rc = my_disclaim64(r2.start(), r2.size());
2013
2014    if (Verbose && !rc) {
2015      warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2016    }
2017
2018    return rc;
2019  }
2020
2021  bool release() {
2022    bool rc = false;
2023    if (::shmdt(_range.start()) != 0) {
2024      warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2025    } else {
2026      rc = true;
2027    }
2028    return rc;
2029  }
2030
2031}; // end: ShmBkShmatedBlock
2032
2033static ShmBkBlock* g_shmbk_list = NULL;
2034static volatile jint g_shmbk_table_lock = 0;
2035
2036// keep some usage statistics
2037static struct {
2038  int nodes;    // number of nodes in list
2039  size_t bytes; // reserved - not committed - bytes.
2040  int reserves; // how often reserve was called
2041  int lookups;  // how often a lookup was made
2042} g_shmbk_stats = { 0, 0, 0, 0 };
2043
2044// add information about a shared memory segment to the bookkeeping
2045static void shmbk_register(ShmBkBlock* p_block) {
2046  guarantee(p_block, "logic error");
2047  p_block->set_next(g_shmbk_list);
2048  g_shmbk_list = p_block;
2049  g_shmbk_stats.reserves ++;
2050  g_shmbk_stats.bytes += p_block->size();
2051  g_shmbk_stats.nodes ++;
2052}
2053
2054// remove information about a shared memory segment by its starting address
2055static void shmbk_unregister(ShmBkBlock* p_block) {
2056  ShmBkBlock* p = g_shmbk_list;
2057  ShmBkBlock* prev = NULL;
2058  while (p) {
2059    if (p == p_block) {
2060      if (prev) {
2061        prev->set_next(p->next());
2062      } else {
2063        g_shmbk_list = p->next();
2064      }
2065      g_shmbk_stats.nodes --;
2066      g_shmbk_stats.bytes -= p->size();
2067      return;
2068    }
2069    prev = p;
2070    p = p->next();
2071  }
2072  assert(false, "should not happen");
2073}
2074
2075// given a pointer, return shared memory bookkeeping record for the segment it points into
2076// using the returned block info must happen under lock protection
2077static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2078  g_shmbk_stats.lookups ++;
2079  ShmBkBlock* p = g_shmbk_list;
2080  while (p) {
2081    if (p->containsAddress(addr)) {
2082      return p;
2083    }
2084    p = p->next();
2085  }
2086  return NULL;
2087}
2088
2089// dump all information about all memory segments allocated with os::reserve_memory()
2090void shmbk_dump_info() {
2091  tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2092    "total reserves: %d total lookups: %d)",
2093    g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2094  const ShmBkBlock* p = g_shmbk_list;
2095  int i = 0;
2096  while (p) {
2097    p->print(tty);
2098    p = p->next();
2099    i ++;
2100  }
2101}
2102
2103#define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
2104#define UNLOCK_SHMBK   }
2105
2106// End: shared memory bookkeeping
2107////////////////////////////////////////////////////////////////////////////////////////////////////
2108
2109int os::vm_page_size() {
2110  // Seems redundant as all get out
2111  assert(os::Aix::page_size() != -1, "must call os::init");
2112  return os::Aix::page_size();
2113}
2114
2115// Aix allocates memory by pages.
2116int os::vm_allocation_granularity() {
2117  assert(os::Aix::page_size() != -1, "must call os::init");
2118  return os::Aix::page_size();
2119}
2120
2121int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2122
2123  // Commit is a noop. There is no explicit commit
2124  // needed on AIX. Memory is committed when touched.
2125  //
2126  // Debug : check address range for validity
2127#ifdef ASSERT
2128  LOCK_SHMBK
2129    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2130    if (!block) {
2131      fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2132      shmbk_dump_info();
2133      assert(false, "invalid pointer");
2134      return false;
2135    } else if (!block->containsRange(addr, size)) {
2136      fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2137      shmbk_dump_info();
2138      assert(false, "invalid range");
2139      return false;
2140    }
2141  UNLOCK_SHMBK
2142#endif // ASSERT
2143
2144  return 0;
2145}
2146
2147bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2148  return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2149}
2150
2151void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2152                                  const char* mesg) {
2153  assert(mesg != NULL, "mesg must be specified");
2154  os::Aix::commit_memory_impl(addr, size, exec);
2155}
2156
2157int os::Aix::commit_memory_impl(char* addr, size_t size,
2158                                size_t alignment_hint, bool exec) {
2159  return os::Aix::commit_memory_impl(addr, size, exec);
2160}
2161
2162bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2163                          bool exec) {
2164  return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2165}
2166
2167void os::pd_commit_memory_or_exit(char* addr, size_t size,
2168                                  size_t alignment_hint, bool exec,
2169                                  const char* mesg) {
2170  os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2171}
2172
2173bool os::pd_uncommit_memory(char* addr, size_t size) {
2174
2175  // Delegate to ShmBkBlock class which knows how to uncommit its memory.
2176
2177  bool rc = false;
2178  LOCK_SHMBK
2179    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2180    if (!block) {
2181      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2182      shmbk_dump_info();
2183      assert(false, "invalid pointer");
2184      return false;
2185    } else if (!block->containsRange(addr, size)) {
2186      fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2187      shmbk_dump_info();
2188      assert(false, "invalid range");
2189      return false;
2190    }
2191    rc = block->disclaim(addr, size);
2192  UNLOCK_SHMBK
2193
2194  if (Verbose && !rc) {
2195    warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2196  }
2197  return rc;
2198}
2199
2200bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2201  return os::guard_memory(addr, size);
2202}
2203
2204bool os::remove_stack_guard_pages(char* addr, size_t size) {
2205  return os::unguard_memory(addr, size);
2206}
2207
2208void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2209}
2210
2211void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2212}
2213
2214void os::numa_make_global(char *addr, size_t bytes) {
2215}
2216
2217void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2218}
2219
2220bool os::numa_topology_changed() {
2221  return false;
2222}
2223
2224size_t os::numa_get_groups_num() {
2225  return 1;
2226}
2227
2228int os::numa_get_group_id() {
2229  return 0;
2230}
2231
2232size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2233  if (size > 0) {
2234    ids[0] = 0;
2235    return 1;
2236  }
2237  return 0;
2238}
2239
2240bool os::get_page_info(char *start, page_info* info) {
2241  return false;
2242}
2243
2244char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2245  return end;
2246}
2247
2248// Flags for reserve_shmatted_memory:
2249#define RESSHM_WISHADDR_OR_FAIL                     1
2250#define RESSHM_TRY_16M_PAGES                        2
2251#define RESSHM_16M_PAGES_OR_FAIL                    4
2252
2253// Result of reserve_shmatted_memory:
2254struct shmatted_memory_info_t {
2255  char* addr;
2256  size_t pagesize;
2257  bool pinned;
2258};
2259
2260// Reserve a section of shmatted memory.
2261// params:
2262// bytes [in]: size of memory, in bytes
2263// requested_addr [in]: wish address.
2264//                      NULL = no wish.
2265//                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2266//                      be obtained, function will fail. Otherwise wish address is treated as hint and
2267//                      another pointer is returned.
2268// flags [in]:          some flags. Valid flags are:
2269//                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2270//                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2271//                          (requires UseLargePages and Use16MPages)
2272//                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2273//                          Otherwise any other page size will do.
2274// p_info [out] :       holds information about the created shared memory segment.
2275static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2276
2277  assert(p_info, "parameter error");
2278
2279  // init output struct.
2280  p_info->addr = NULL;
2281
2282  // neither should we be here for EXTSHM=ON.
2283  if (os::Aix::extshm()) {
2284    ShouldNotReachHere();
2285  }
2286
2287  // extract flags. sanity checks.
2288  const bool wishaddr_or_fail =
2289    flags & RESSHM_WISHADDR_OR_FAIL;
2290  const bool try_16M_pages =
2291    flags & RESSHM_TRY_16M_PAGES;
2292  const bool f16M_pages_or_fail =
2293    flags & RESSHM_16M_PAGES_OR_FAIL;
2294
2295  // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2296  // shmat will fail anyway, so save some cycles by failing right away
2297  if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2298    if (wishaddr_or_fail) {
2299      return false;
2300    } else {
2301      requested_addr = NULL;
2302    }
2303  }
2304
2305  char* addr = NULL;
2306
2307  // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2308  // pagesize dynamically.
2309  const size_t size = align_size_up(bytes, SIZE_16M);
2310
2311  // reserve the shared segment
2312  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2313  if (shmid == -1) {
2314    warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2315    return false;
2316  }
2317
2318  // Important note:
2319  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2320  // We must right after attaching it remove it from the system. System V shm segments are global and
2321  // survive the process.
2322  // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2323
2324  // try forcing the page size
2325  size_t pagesize = -1; // unknown so far
2326
2327  if (UseLargePages) {
2328
2329    struct shmid_ds shmbuf;
2330    memset(&shmbuf, 0, sizeof(shmbuf));
2331
2332    // First, try to take from 16M page pool if...
2333    if (os::Aix::can_use_16M_pages()  // we can ...
2334        && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2335        && try_16M_pages) {           // caller wants us to.
2336      shmbuf.shm_pagesize = SIZE_16M;
2337      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2338        pagesize = SIZE_16M;
2339      } else {
2340        warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2341                size / SIZE_16M, errno);
2342        if (f16M_pages_or_fail) {
2343          goto cleanup_shm;
2344        }
2345      }
2346    }
2347
2348    // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2349    // because the 64K page pool may also be exhausted.
2350    if (pagesize == -1) {
2351      shmbuf.shm_pagesize = SIZE_64K;
2352      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2353        pagesize = SIZE_64K;
2354      } else {
2355        warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2356                size / SIZE_64K, errno);
2357        // here I give up. leave page_size -1 - later, after attaching, we will query the
2358        // real page size of the attached memory. (in theory, it may be something different
2359        // from 4K if LDR_CNTRL SHM_PSIZE is set)
2360      }
2361    }
2362  }
2363
2364  // sanity point
2365  assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2366
2367  // Now attach the shared segment.
2368  addr = (char*) shmat(shmid, requested_addr, 0);
2369  if (addr == (char*)-1) {
2370    // How to handle attach failure:
2371    // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2372    // mandatory, fail, if not, retry anywhere.
2373    // If it failed for any other reason, treat that as fatal error.
2374    addr = NULL;
2375    if (requested_addr) {
2376      if (wishaddr_or_fail) {
2377        goto cleanup_shm;
2378      } else {
2379        addr = (char*) shmat(shmid, NULL, 0);
2380        if (addr == (char*)-1) { // fatal
2381          addr = NULL;
2382          warning("shmat failed (errno: %d)", errno);
2383          goto cleanup_shm;
2384        }
2385      }
2386    } else { // fatal
2387      addr = NULL;
2388      warning("shmat failed (errno: %d)", errno);
2389      goto cleanup_shm;
2390    }
2391  }
2392
2393  // sanity point
2394  assert(addr && addr != (char*) -1, "wrong address");
2395
2396  // after successful Attach remove the segment - right away.
2397  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2398    warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2399    guarantee(false, "failed to remove shared memory segment!");
2400  }
2401  shmid = -1;
2402
2403  // query the real page size. In case setting the page size did not work (see above), the system
2404  // may have given us something other then 4K (LDR_CNTRL)
2405  {
2406    const size_t real_pagesize = os::Aix::query_pagesize(addr);
2407    if (pagesize != -1) {
2408      assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2409    } else {
2410      pagesize = real_pagesize;
2411    }
2412  }
2413
2414  // Now register the reserved block with internal book keeping.
2415  LOCK_SHMBK
2416    const bool pinned = pagesize >= SIZE_16M ? true : false;
2417    ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2418    assert(p_block, "");
2419    shmbk_register(p_block);
2420  UNLOCK_SHMBK
2421
2422cleanup_shm:
2423
2424  // if we have not done so yet, remove the shared memory segment. This is very important.
2425  if (shmid != -1) {
2426    if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2427      warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2428      guarantee(false, "failed to remove shared memory segment!");
2429    }
2430    shmid = -1;
2431  }
2432
2433  // trace
2434  if (Verbose && !addr) {
2435    if (requested_addr != NULL) {
2436      warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
2437    } else {
2438      warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2439    }
2440  }
2441
2442  // hand info to caller
2443  if (addr) {
2444    p_info->addr = addr;
2445    p_info->pagesize = pagesize;
2446    p_info->pinned = pagesize == SIZE_16M ? true : false;
2447  }
2448
2449  // sanity test:
2450  if (requested_addr && addr && wishaddr_or_fail) {
2451    guarantee(addr == requested_addr, "shmat error");
2452  }
2453
2454  // just one more test to really make sure we have no dangling shm segments.
2455  guarantee(shmid == -1, "dangling shm segments");
2456
2457  return addr ? true : false;
2458
2459} // end: reserve_shmatted_memory
2460
2461// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2462// will return NULL in case of an error.
2463static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2464
2465  // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2466  if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2467    warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2468    return NULL;
2469  }
2470
2471  const size_t size = align_size_up(bytes, SIZE_4K);
2472
2473  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2474  // msync(MS_INVALIDATE) (see os::uncommit_memory)
2475  int flags = MAP_ANONYMOUS | MAP_SHARED;
2476
2477  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2478  // it means if wishaddress is given but MAP_FIXED is not set.
2479  //
2480  // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2481  // clobbers the address range, which is probably not what the caller wants. That's
2482  // why I assert here (again) that the SPEC1170 compat mode is off.
2483  // If we want to be able to run under SPEC1170, we have to do some porting and
2484  // testing.
2485  if (requested_addr != NULL) {
2486    assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2487    flags |= MAP_FIXED;
2488  }
2489
2490  char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2491
2492  if (addr == MAP_FAILED) {
2493    // attach failed: tolerate for specific wish addresses. Not being able to attach
2494    // anywhere is a fatal error.
2495    if (requested_addr == NULL) {
2496      // It's ok to fail here if the machine has not enough memory.
2497      warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2498    }
2499    addr = NULL;
2500    goto cleanup_mmap;
2501  }
2502
2503  // If we did request a specific address and that address was not available, fail.
2504  if (addr && requested_addr) {
2505    guarantee(addr == requested_addr, "unexpected");
2506  }
2507
2508  // register this mmap'ed segment with book keeping
2509  LOCK_SHMBK
2510    ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2511    assert(p_block, "");
2512    shmbk_register(p_block);
2513  UNLOCK_SHMBK
2514
2515cleanup_mmap:
2516
2517  // trace
2518  if (Verbose) {
2519    if (addr) {
2520      fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2521    }
2522    else {
2523      if (requested_addr != NULL) {
2524        warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2525      } else {
2526        warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2527      }
2528    }
2529  }
2530
2531  return addr;
2532
2533} // end: reserve_mmaped_memory
2534
2535// Reserves and attaches a shared memory segment.
2536// Will assert if a wish address is given and could not be obtained.
2537char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2538  return os::attempt_reserve_memory_at(bytes, requested_addr);
2539}
2540
2541bool os::pd_release_memory(char* addr, size_t size) {
2542
2543  // delegate to ShmBkBlock class which knows how to uncommit its memory.
2544
2545  bool rc = false;
2546  LOCK_SHMBK
2547    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2548    if (!block) {
2549      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2550      shmbk_dump_info();
2551      assert(false, "invalid pointer");
2552      return false;
2553    }
2554    else if (!block->isSameRange(addr, size)) {
2555      if (block->getType() == ShmBkBlock::MMAP) {
2556        // Release only the same range or a the beginning or the end of a range.
2557        if (block->base() == addr && size < block->size()) {
2558          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2559          assert(b, "");
2560          shmbk_register(b);
2561          block->setAddrRange(AddrRange(addr, size));
2562        }
2563        else if (addr > block->base() && addr + size == block->base() + block->size()) {
2564          ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2565          assert(b, "");
2566          shmbk_register(b);
2567          block->setAddrRange(AddrRange(addr, size));
2568        }
2569        else {
2570          fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2571          shmbk_dump_info();
2572          assert(false, "invalid mmap range");
2573          return false;
2574        }
2575      }
2576      else {
2577        // Release only the same range. No partial release allowed.
2578        // Soften the requirement a bit, because the user may think he owns a smaller size
2579        // than the block is due to alignment etc.
2580        if (block->base() != addr || block->size() < size) {
2581          fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2582          shmbk_dump_info();
2583          assert(false, "invalid shmget range");
2584          return false;
2585        }
2586      }
2587    }
2588    rc = block->release();
2589    assert(rc, "release failed");
2590    // remove block from bookkeeping
2591    shmbk_unregister(block);
2592    delete block;
2593  UNLOCK_SHMBK
2594
2595  if (!rc) {
2596    warning("failed to released %lu bytes at 0x%p", size, addr);
2597  }
2598
2599  return rc;
2600}
2601
2602static bool checked_mprotect(char* addr, size_t size, int prot) {
2603
2604  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2605  // not tell me if protection failed when trying to protect an un-protectable range.
2606  //
2607  // This means if the memory was allocated using shmget/shmat, protection wont work
2608  // but mprotect will still return 0:
2609  //
2610  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2611
2612  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2613
2614  if (!rc) {
2615    const char* const s_errno = strerror(errno);
2616    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2617    return false;
2618  }
2619
2620  // mprotect success check
2621  //
2622  // Mprotect said it changed the protection but can I believe it?
2623  //
2624  // To be sure I need to check the protection afterwards. Try to
2625  // read from protected memory and check whether that causes a segfault.
2626  //
2627  if (!os::Aix::xpg_sus_mode()) {
2628
2629    if (StubRoutines::SafeFetch32_stub()) {
2630
2631      const bool read_protected =
2632        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2633         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2634
2635      if (prot & PROT_READ) {
2636        rc = !read_protected;
2637      } else {
2638        rc = read_protected;
2639      }
2640    }
2641  }
2642  if (!rc) {
2643    assert(false, "mprotect failed.");
2644  }
2645  return rc;
2646}
2647
2648// Set protections specified
2649bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2650  unsigned int p = 0;
2651  switch (prot) {
2652  case MEM_PROT_NONE: p = PROT_NONE; break;
2653  case MEM_PROT_READ: p = PROT_READ; break;
2654  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
2655  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2656  default:
2657    ShouldNotReachHere();
2658  }
2659  // is_committed is unused.
2660  return checked_mprotect(addr, size, p);
2661}
2662
2663bool os::guard_memory(char* addr, size_t size) {
2664  return checked_mprotect(addr, size, PROT_NONE);
2665}
2666
2667bool os::unguard_memory(char* addr, size_t size) {
2668  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2669}
2670
2671// Large page support
2672
2673static size_t _large_page_size = 0;
2674
2675// Enable large page support if OS allows that.
2676void os::large_page_init() {
2677
2678  // Note: os::Aix::query_multipage_support must run first.
2679
2680  if (!UseLargePages) {
2681    return;
2682  }
2683
2684  if (!Aix::can_use_64K_pages()) {
2685    assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2686    UseLargePages = false;
2687    return;
2688  }
2689
2690  if (!Aix::can_use_16M_pages() && Use16MPages) {
2691    fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2692            " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2693  }
2694
2695  // Do not report 16M page alignment as part of os::_page_sizes if we are
2696  // explicitly forbidden from using 16M pages. Doing so would increase the
2697  // alignment the garbage collector calculates with, slightly increasing
2698  // heap usage. We should only pay for 16M alignment if we really want to
2699  // use 16M pages.
2700  if (Use16MPages && Aix::can_use_16M_pages()) {
2701    _large_page_size = SIZE_16M;
2702    _page_sizes[0] = SIZE_16M;
2703    _page_sizes[1] = SIZE_64K;
2704    _page_sizes[2] = SIZE_4K;
2705    _page_sizes[3] = 0;
2706  } else if (Aix::can_use_64K_pages()) {
2707    _large_page_size = SIZE_64K;
2708    _page_sizes[0] = SIZE_64K;
2709    _page_sizes[1] = SIZE_4K;
2710    _page_sizes[2] = 0;
2711  }
2712
2713  if (Verbose) {
2714    ("Default large page size is 0x%llX.", _large_page_size);
2715  }
2716} // end: os::large_page_init()
2717
2718char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2719  // "exec" is passed in but not used. Creating the shared image for
2720  // the code cache doesn't have an SHM_X executable permission to check.
2721  Unimplemented();
2722  return 0;
2723}
2724
2725bool os::release_memory_special(char* base, size_t bytes) {
2726  // detaching the SHM segment will also delete it, see reserve_memory_special()
2727  Unimplemented();
2728  return false;
2729}
2730
2731size_t os::large_page_size() {
2732  return _large_page_size;
2733}
2734
2735bool os::can_commit_large_page_memory() {
2736  // Well, sadly we cannot commit anything at all (see comment in
2737  // os::commit_memory) but we claim to so we can make use of large pages
2738  return true;
2739}
2740
2741bool os::can_execute_large_page_memory() {
2742  // We can do that
2743  return true;
2744}
2745
2746// Reserve memory at an arbitrary address, only if that area is
2747// available (and not reserved for something else).
2748char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2749
2750  bool use_mmap = false;
2751
2752  // mmap: smaller graining, no large page support
2753  // shm: large graining (256M), large page support, limited number of shm segments
2754  //
2755  // Prefer mmap wherever we either do not need large page support or have OS limits
2756
2757  if (!UseLargePages || bytes < SIZE_16M) {
2758    use_mmap = true;
2759  }
2760
2761  char* addr = NULL;
2762  if (use_mmap) {
2763    addr = reserve_mmaped_memory(bytes, requested_addr);
2764  } else {
2765    // shmat: wish address is mandatory, and do not try 16M pages here.
2766    shmatted_memory_info_t info;
2767    const int flags = RESSHM_WISHADDR_OR_FAIL;
2768    if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2769      addr = info.addr;
2770    }
2771  }
2772
2773  return addr;
2774}
2775
2776size_t os::read(int fd, void *buf, unsigned int nBytes) {
2777  return ::read(fd, buf, nBytes);
2778}
2779
2780void os::naked_short_sleep(jlong ms) {
2781  struct timespec req;
2782
2783  assert(ms < 1000, "Un-interruptable sleep, short time use only");
2784  req.tv_sec = 0;
2785  if (ms > 0) {
2786    req.tv_nsec = (ms % 1000) * 1000000;
2787  }
2788  else {
2789    req.tv_nsec = 1;
2790  }
2791
2792  nanosleep(&req, NULL);
2793
2794  return;
2795}
2796
2797// Sleep forever; naked call to OS-specific sleep; use with CAUTION
2798void os::infinite_sleep() {
2799  while (true) {    // sleep forever ...
2800    ::sleep(100);   // ... 100 seconds at a time
2801  }
2802}
2803
2804// Used to convert frequent JVM_Yield() to nops
2805bool os::dont_yield() {
2806  return DontYieldALot;
2807}
2808
2809void os::yield() {
2810  sched_yield();
2811}
2812
2813os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
2814
2815////////////////////////////////////////////////////////////////////////////////
2816// thread priority support
2817
2818// From AIX manpage to pthread_setschedparam
2819// (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2820//    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2821//
2822// "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2823// range from 40 to 80, where 40 is the least favored priority and 80
2824// is the most favored."
2825//
2826// (Actually, I doubt this even has an impact on AIX, as we do kernel
2827// scheduling there; however, this still leaves iSeries.)
2828//
2829// We use the same values for AIX and PASE.
2830int os::java_to_os_priority[CriticalPriority + 1] = {
2831  54,             // 0 Entry should never be used
2832
2833  55,             // 1 MinPriority
2834  55,             // 2
2835  56,             // 3
2836
2837  56,             // 4
2838  57,             // 5 NormPriority
2839  57,             // 6
2840
2841  58,             // 7
2842  58,             // 8
2843  59,             // 9 NearMaxPriority
2844
2845  60,             // 10 MaxPriority
2846
2847  60              // 11 CriticalPriority
2848};
2849
2850OSReturn os::set_native_priority(Thread* thread, int newpri) {
2851  if (!UseThreadPriorities) return OS_OK;
2852  pthread_t thr = thread->osthread()->pthread_id();
2853  int policy = SCHED_OTHER;
2854  struct sched_param param;
2855  param.sched_priority = newpri;
2856  int ret = pthread_setschedparam(thr, policy, &param);
2857
2858  if (Verbose) {
2859    if (ret == 0) {
2860      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
2861    } else {
2862      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
2863              (int)thr, newpri, ret, strerror(ret));
2864    }
2865  }
2866  return (ret == 0) ? OS_OK : OS_ERR;
2867}
2868
2869OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2870  if (!UseThreadPriorities) {
2871    *priority_ptr = java_to_os_priority[NormPriority];
2872    return OS_OK;
2873  }
2874  pthread_t thr = thread->osthread()->pthread_id();
2875  int policy = SCHED_OTHER;
2876  struct sched_param param;
2877  int ret = pthread_getschedparam(thr, &policy, &param);
2878  *priority_ptr = param.sched_priority;
2879
2880  return (ret == 0) ? OS_OK : OS_ERR;
2881}
2882
2883// Hint to the underlying OS that a task switch would not be good.
2884// Void return because it's a hint and can fail.
2885void os::hint_no_preempt() {}
2886
2887////////////////////////////////////////////////////////////////////////////////
2888// suspend/resume support
2889
2890//  the low-level signal-based suspend/resume support is a remnant from the
2891//  old VM-suspension that used to be for java-suspension, safepoints etc,
2892//  within hotspot. Now there is a single use-case for this:
2893//    - calling get_thread_pc() on the VMThread by the flat-profiler task
2894//      that runs in the watcher thread.
2895//  The remaining code is greatly simplified from the more general suspension
2896//  code that used to be used.
2897//
2898//  The protocol is quite simple:
2899//  - suspend:
2900//      - sends a signal to the target thread
2901//      - polls the suspend state of the osthread using a yield loop
2902//      - target thread signal handler (SR_handler) sets suspend state
2903//        and blocks in sigsuspend until continued
2904//  - resume:
2905//      - sets target osthread state to continue
2906//      - sends signal to end the sigsuspend loop in the SR_handler
2907//
2908//  Note that the SR_lock plays no role in this suspend/resume protocol.
2909//
2910
2911static void resume_clear_context(OSThread *osthread) {
2912  osthread->set_ucontext(NULL);
2913  osthread->set_siginfo(NULL);
2914}
2915
2916static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
2917  osthread->set_ucontext(context);
2918  osthread->set_siginfo(siginfo);
2919}
2920
2921//
2922// Handler function invoked when a thread's execution is suspended or
2923// resumed. We have to be careful that only async-safe functions are
2924// called here (Note: most pthread functions are not async safe and
2925// should be avoided.)
2926//
2927// Note: sigwait() is a more natural fit than sigsuspend() from an
2928// interface point of view, but sigwait() prevents the signal hander
2929// from being run. libpthread would get very confused by not having
2930// its signal handlers run and prevents sigwait()'s use with the
2931// mutex granting granting signal.
2932//
2933// Currently only ever called on the VMThread and JavaThreads (PC sampling).
2934//
2935static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
2936  // Save and restore errno to avoid confusing native code with EINTR
2937  // after sigsuspend.
2938  int old_errno = errno;
2939
2940  Thread* thread = Thread::current();
2941  OSThread* osthread = thread->osthread();
2942  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
2943
2944  os::SuspendResume::State current = osthread->sr.state();
2945  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
2946    suspend_save_context(osthread, siginfo, context);
2947
2948    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
2949    os::SuspendResume::State state = osthread->sr.suspended();
2950    if (state == os::SuspendResume::SR_SUSPENDED) {
2951      sigset_t suspend_set;  // signals for sigsuspend()
2952
2953      // get current set of blocked signals and unblock resume signal
2954      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
2955      sigdelset(&suspend_set, SR_signum);
2956
2957      // wait here until we are resumed
2958      while (1) {
2959        sigsuspend(&suspend_set);
2960
2961        os::SuspendResume::State result = osthread->sr.running();
2962        if (result == os::SuspendResume::SR_RUNNING) {
2963          break;
2964        }
2965      }
2966
2967    } else if (state == os::SuspendResume::SR_RUNNING) {
2968      // request was cancelled, continue
2969    } else {
2970      ShouldNotReachHere();
2971    }
2972
2973    resume_clear_context(osthread);
2974  } else if (current == os::SuspendResume::SR_RUNNING) {
2975    // request was cancelled, continue
2976  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
2977    // ignore
2978  } else {
2979    ShouldNotReachHere();
2980  }
2981
2982  errno = old_errno;
2983}
2984
2985
2986static int SR_initialize() {
2987  struct sigaction act;
2988  char *s;
2989  // Get signal number to use for suspend/resume
2990  if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
2991    int sig = ::strtol(s, 0, 10);
2992    if (sig > 0 || sig < NSIG) {
2993      SR_signum = sig;
2994    }
2995  }
2996
2997  assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
2998        "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
2999
3000  sigemptyset(&SR_sigset);
3001  sigaddset(&SR_sigset, SR_signum);
3002
3003  // Set up signal handler for suspend/resume.
3004  act.sa_flags = SA_RESTART|SA_SIGINFO;
3005  act.sa_handler = (void (*)(int)) SR_handler;
3006
3007  // SR_signum is blocked by default.
3008  // 4528190 - We also need to block pthread restart signal (32 on all
3009  // supported Linux platforms). Note that LinuxThreads need to block
3010  // this signal for all threads to work properly. So we don't have
3011  // to use hard-coded signal number when setting up the mask.
3012  pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3013
3014  if (sigaction(SR_signum, &act, 0) == -1) {
3015    return -1;
3016  }
3017
3018  // Save signal flag
3019  os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3020  return 0;
3021}
3022
3023static int SR_finalize() {
3024  return 0;
3025}
3026
3027static int sr_notify(OSThread* osthread) {
3028  int status = pthread_kill(osthread->pthread_id(), SR_signum);
3029  assert_status(status == 0, status, "pthread_kill");
3030  return status;
3031}
3032
3033// "Randomly" selected value for how long we want to spin
3034// before bailing out on suspending a thread, also how often
3035// we send a signal to a thread we want to resume
3036static const int RANDOMLY_LARGE_INTEGER = 1000000;
3037static const int RANDOMLY_LARGE_INTEGER2 = 100;
3038
3039// returns true on success and false on error - really an error is fatal
3040// but this seems the normal response to library errors
3041static bool do_suspend(OSThread* osthread) {
3042  assert(osthread->sr.is_running(), "thread should be running");
3043  // mark as suspended and send signal
3044
3045  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3046    // failed to switch, state wasn't running?
3047    ShouldNotReachHere();
3048    return false;
3049  }
3050
3051  if (sr_notify(osthread) != 0) {
3052    // try to cancel, switch to running
3053
3054    os::SuspendResume::State result = osthread->sr.cancel_suspend();
3055    if (result == os::SuspendResume::SR_RUNNING) {
3056      // cancelled
3057      return false;
3058    } else if (result == os::SuspendResume::SR_SUSPENDED) {
3059      // somehow managed to suspend
3060      return true;
3061    } else {
3062      ShouldNotReachHere();
3063      return false;
3064    }
3065  }
3066
3067  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3068
3069  for (int n = 0; !osthread->sr.is_suspended(); n++) {
3070    for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3071      os::yield();
3072    }
3073
3074    // timeout, try to cancel the request
3075    if (n >= RANDOMLY_LARGE_INTEGER) {
3076      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3077      if (cancelled == os::SuspendResume::SR_RUNNING) {
3078        return false;
3079      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3080        return true;
3081      } else {
3082        ShouldNotReachHere();
3083        return false;
3084      }
3085    }
3086  }
3087
3088  guarantee(osthread->sr.is_suspended(), "Must be suspended");
3089  return true;
3090}
3091
3092static void do_resume(OSThread* osthread) {
3093  //assert(osthread->sr.is_suspended(), "thread should be suspended");
3094
3095  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3096    // failed to switch to WAKEUP_REQUEST
3097    ShouldNotReachHere();
3098    return;
3099  }
3100
3101  while (!osthread->sr.is_running()) {
3102    if (sr_notify(osthread) == 0) {
3103      for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3104        for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3105          os::yield();
3106        }
3107      }
3108    } else {
3109      ShouldNotReachHere();
3110    }
3111  }
3112
3113  guarantee(osthread->sr.is_running(), "Must be running!");
3114}
3115
3116///////////////////////////////////////////////////////////////////////////////////
3117// signal handling (except suspend/resume)
3118
3119// This routine may be used by user applications as a "hook" to catch signals.
3120// The user-defined signal handler must pass unrecognized signals to this
3121// routine, and if it returns true (non-zero), then the signal handler must
3122// return immediately. If the flag "abort_if_unrecognized" is true, then this
3123// routine will never retun false (zero), but instead will execute a VM panic
3124// routine kill the process.
3125//
3126// If this routine returns false, it is OK to call it again. This allows
3127// the user-defined signal handler to perform checks either before or after
3128// the VM performs its own checks. Naturally, the user code would be making
3129// a serious error if it tried to handle an exception (such as a null check
3130// or breakpoint) that the VM was generating for its own correct operation.
3131//
3132// This routine may recognize any of the following kinds of signals:
3133//   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3134// It should be consulted by handlers for any of those signals.
3135//
3136// The caller of this routine must pass in the three arguments supplied
3137// to the function referred to in the "sa_sigaction" (not the "sa_handler")
3138// field of the structure passed to sigaction(). This routine assumes that
3139// the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3140//
3141// Note that the VM will print warnings if it detects conflicting signal
3142// handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3143//
3144extern "C" JNIEXPORT int
3145JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3146
3147// Set thread signal mask (for some reason on AIX sigthreadmask() seems
3148// to be the thing to call; documentation is not terribly clear about whether
3149// pthread_sigmask also works, and if it does, whether it does the same.
3150bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3151  const int rc = ::pthread_sigmask(how, set, oset);
3152  // return value semantics differ slightly for error case:
3153  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3154  // (so, pthread_sigmask is more theadsafe for error handling)
3155  // But success is always 0.
3156  return rc == 0 ? true : false;
3157}
3158
3159// Function to unblock all signals which are, according
3160// to POSIX, typical program error signals. If they happen while being blocked,
3161// they typically will bring down the process immediately.
3162bool unblock_program_error_signals() {
3163  sigset_t set;
3164  ::sigemptyset(&set);
3165  ::sigaddset(&set, SIGILL);
3166  ::sigaddset(&set, SIGBUS);
3167  ::sigaddset(&set, SIGFPE);
3168  ::sigaddset(&set, SIGSEGV);
3169  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3170}
3171
3172// Renamed from 'signalHandler' to avoid collision with other shared libs.
3173void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3174  assert(info != NULL && uc != NULL, "it must be old kernel");
3175
3176  // Never leave program error signals blocked;
3177  // on all our platforms they would bring down the process immediately when
3178  // getting raised while being blocked.
3179  unblock_program_error_signals();
3180
3181  JVM_handle_aix_signal(sig, info, uc, true);
3182}
3183
3184
3185// This boolean allows users to forward their own non-matching signals
3186// to JVM_handle_aix_signal, harmlessly.
3187bool os::Aix::signal_handlers_are_installed = false;
3188
3189// For signal-chaining
3190struct sigaction os::Aix::sigact[MAXSIGNUM];
3191unsigned int os::Aix::sigs = 0;
3192bool os::Aix::libjsig_is_loaded = false;
3193typedef struct sigaction *(*get_signal_t)(int);
3194get_signal_t os::Aix::get_signal_action = NULL;
3195
3196struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3197  struct sigaction *actp = NULL;
3198
3199  if (libjsig_is_loaded) {
3200    // Retrieve the old signal handler from libjsig
3201    actp = (*get_signal_action)(sig);
3202  }
3203  if (actp == NULL) {
3204    // Retrieve the preinstalled signal handler from jvm
3205    actp = get_preinstalled_handler(sig);
3206  }
3207
3208  return actp;
3209}
3210
3211static bool call_chained_handler(struct sigaction *actp, int sig,
3212                                 siginfo_t *siginfo, void *context) {
3213  // Call the old signal handler
3214  if (actp->sa_handler == SIG_DFL) {
3215    // It's more reasonable to let jvm treat it as an unexpected exception
3216    // instead of taking the default action.
3217    return false;
3218  } else if (actp->sa_handler != SIG_IGN) {
3219    if ((actp->sa_flags & SA_NODEFER) == 0) {
3220      // automaticlly block the signal
3221      sigaddset(&(actp->sa_mask), sig);
3222    }
3223
3224    sa_handler_t hand = NULL;
3225    sa_sigaction_t sa = NULL;
3226    bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3227    // retrieve the chained handler
3228    if (siginfo_flag_set) {
3229      sa = actp->sa_sigaction;
3230    } else {
3231      hand = actp->sa_handler;
3232    }
3233
3234    if ((actp->sa_flags & SA_RESETHAND) != 0) {
3235      actp->sa_handler = SIG_DFL;
3236    }
3237
3238    // try to honor the signal mask
3239    sigset_t oset;
3240    pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3241
3242    // call into the chained handler
3243    if (siginfo_flag_set) {
3244      (*sa)(sig, siginfo, context);
3245    } else {
3246      (*hand)(sig);
3247    }
3248
3249    // restore the signal mask
3250    pthread_sigmask(SIG_SETMASK, &oset, 0);
3251  }
3252  // Tell jvm's signal handler the signal is taken care of.
3253  return true;
3254}
3255
3256bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3257  bool chained = false;
3258  // signal-chaining
3259  if (UseSignalChaining) {
3260    struct sigaction *actp = get_chained_signal_action(sig);
3261    if (actp != NULL) {
3262      chained = call_chained_handler(actp, sig, siginfo, context);
3263    }
3264  }
3265  return chained;
3266}
3267
3268struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3269  if ((((unsigned int)1 << sig) & sigs) != 0) {
3270    return &sigact[sig];
3271  }
3272  return NULL;
3273}
3274
3275void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3276  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3277  sigact[sig] = oldAct;
3278  sigs |= (unsigned int)1 << sig;
3279}
3280
3281// for diagnostic
3282int os::Aix::sigflags[MAXSIGNUM];
3283
3284int os::Aix::get_our_sigflags(int sig) {
3285  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3286  return sigflags[sig];
3287}
3288
3289void os::Aix::set_our_sigflags(int sig, int flags) {
3290  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3291  sigflags[sig] = flags;
3292}
3293
3294void os::Aix::set_signal_handler(int sig, bool set_installed) {
3295  // Check for overwrite.
3296  struct sigaction oldAct;
3297  sigaction(sig, (struct sigaction*)NULL, &oldAct);
3298
3299  void* oldhand = oldAct.sa_sigaction
3300    ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3301    : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3302  // Renamed 'signalHandler' to avoid collision with other shared libs.
3303  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3304      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3305      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3306    if (AllowUserSignalHandlers || !set_installed) {
3307      // Do not overwrite; user takes responsibility to forward to us.
3308      return;
3309    } else if (UseSignalChaining) {
3310      // save the old handler in jvm
3311      save_preinstalled_handler(sig, oldAct);
3312      // libjsig also interposes the sigaction() call below and saves the
3313      // old sigaction on it own.
3314    } else {
3315      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3316                    "%#lx for signal %d.", (long)oldhand, sig));
3317    }
3318  }
3319
3320  struct sigaction sigAct;
3321  sigfillset(&(sigAct.sa_mask));
3322  if (!set_installed) {
3323    sigAct.sa_handler = SIG_DFL;
3324    sigAct.sa_flags = SA_RESTART;
3325  } else {
3326    // Renamed 'signalHandler' to avoid collision with other shared libs.
3327    sigAct.sa_sigaction = javaSignalHandler;
3328    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3329  }
3330  // Save flags, which are set by ours
3331  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3332  sigflags[sig] = sigAct.sa_flags;
3333
3334  int ret = sigaction(sig, &sigAct, &oldAct);
3335  assert(ret == 0, "check");
3336
3337  void* oldhand2 = oldAct.sa_sigaction
3338                 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3339                 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3340  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3341}
3342
3343// install signal handlers for signals that HotSpot needs to
3344// handle in order to support Java-level exception handling.
3345void os::Aix::install_signal_handlers() {
3346  if (!signal_handlers_are_installed) {
3347    signal_handlers_are_installed = true;
3348
3349    // signal-chaining
3350    typedef void (*signal_setting_t)();
3351    signal_setting_t begin_signal_setting = NULL;
3352    signal_setting_t end_signal_setting = NULL;
3353    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3354                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3355    if (begin_signal_setting != NULL) {
3356      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3357                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3358      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3359                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3360      libjsig_is_loaded = true;
3361      assert(UseSignalChaining, "should enable signal-chaining");
3362    }
3363    if (libjsig_is_loaded) {
3364      // Tell libjsig jvm is setting signal handlers
3365      (*begin_signal_setting)();
3366    }
3367
3368    set_signal_handler(SIGSEGV, true);
3369    set_signal_handler(SIGPIPE, true);
3370    set_signal_handler(SIGBUS, true);
3371    set_signal_handler(SIGILL, true);
3372    set_signal_handler(SIGFPE, true);
3373    set_signal_handler(SIGTRAP, true);
3374    set_signal_handler(SIGXFSZ, true);
3375    set_signal_handler(SIGDANGER, true);
3376
3377    if (libjsig_is_loaded) {
3378      // Tell libjsig jvm finishes setting signal handlers
3379      (*end_signal_setting)();
3380    }
3381
3382    // We don't activate signal checker if libjsig is in place, we trust ourselves
3383    // and if UserSignalHandler is installed all bets are off.
3384    // Log that signal checking is off only if -verbose:jni is specified.
3385    if (CheckJNICalls) {
3386      if (libjsig_is_loaded) {
3387        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3388        check_signals = false;
3389      }
3390      if (AllowUserSignalHandlers) {
3391        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3392        check_signals = false;
3393      }
3394      // need to initialize check_signal_done
3395      ::sigemptyset(&check_signal_done);
3396    }
3397  }
3398}
3399
3400static const char* get_signal_handler_name(address handler,
3401                                           char* buf, int buflen) {
3402  int offset;
3403  bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3404  if (found) {
3405    // skip directory names
3406    const char *p1, *p2;
3407    p1 = buf;
3408    size_t len = strlen(os::file_separator());
3409    while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3410    // The way os::dll_address_to_library_name is implemented on Aix
3411    // right now, it always returns -1 for the offset which is not
3412    // terribly informative.
3413    // Will fix that. For now, omit the offset.
3414    jio_snprintf(buf, buflen, "%s", p1);
3415  } else {
3416    jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3417  }
3418  return buf;
3419}
3420
3421static void print_signal_handler(outputStream* st, int sig,
3422                                 char* buf, size_t buflen) {
3423  struct sigaction sa;
3424  sigaction(sig, NULL, &sa);
3425
3426  st->print("%s: ", os::exception_name(sig, buf, buflen));
3427
3428  address handler = (sa.sa_flags & SA_SIGINFO)
3429    ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3430    : CAST_FROM_FN_PTR(address, sa.sa_handler);
3431
3432  if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3433    st->print("SIG_DFL");
3434  } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3435    st->print("SIG_IGN");
3436  } else {
3437    st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3438  }
3439
3440  // Print readable mask.
3441  st->print(", sa_mask[0]=");
3442  os::Posix::print_signal_set_short(st, &sa.sa_mask);
3443
3444  address rh = VMError::get_resetted_sighandler(sig);
3445  // May be, handler was resetted by VMError?
3446  if (rh != NULL) {
3447    handler = rh;
3448    sa.sa_flags = VMError::get_resetted_sigflags(sig);
3449  }
3450
3451  // Print textual representation of sa_flags.
3452  st->print(", sa_flags=");
3453  os::Posix::print_sa_flags(st, sa.sa_flags);
3454
3455  // Check: is it our handler?
3456  if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3457      handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3458    // It is our signal handler.
3459    // Check for flags, reset system-used one!
3460    if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3461      st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3462                os::Aix::get_our_sigflags(sig));
3463    }
3464  }
3465  st->cr();
3466}
3467
3468
3469#define DO_SIGNAL_CHECK(sig) \
3470  if (!sigismember(&check_signal_done, sig)) \
3471    os::Aix::check_signal_handler(sig)
3472
3473// This method is a periodic task to check for misbehaving JNI applications
3474// under CheckJNI, we can add any periodic checks here
3475
3476void os::run_periodic_checks() {
3477
3478  if (check_signals == false) return;
3479
3480  // SEGV and BUS if overridden could potentially prevent
3481  // generation of hs*.log in the event of a crash, debugging
3482  // such a case can be very challenging, so we absolutely
3483  // check the following for a good measure:
3484  DO_SIGNAL_CHECK(SIGSEGV);
3485  DO_SIGNAL_CHECK(SIGILL);
3486  DO_SIGNAL_CHECK(SIGFPE);
3487  DO_SIGNAL_CHECK(SIGBUS);
3488  DO_SIGNAL_CHECK(SIGPIPE);
3489  DO_SIGNAL_CHECK(SIGXFSZ);
3490  if (UseSIGTRAP) {
3491    DO_SIGNAL_CHECK(SIGTRAP);
3492  }
3493  DO_SIGNAL_CHECK(SIGDANGER);
3494
3495  // ReduceSignalUsage allows the user to override these handlers
3496  // see comments at the very top and jvm_solaris.h
3497  if (!ReduceSignalUsage) {
3498    DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3499    DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3500    DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3501    DO_SIGNAL_CHECK(BREAK_SIGNAL);
3502  }
3503
3504  DO_SIGNAL_CHECK(SR_signum);
3505  DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3506}
3507
3508typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3509
3510static os_sigaction_t os_sigaction = NULL;
3511
3512void os::Aix::check_signal_handler(int sig) {
3513  char buf[O_BUFLEN];
3514  address jvmHandler = NULL;
3515
3516  struct sigaction act;
3517  if (os_sigaction == NULL) {
3518    // only trust the default sigaction, in case it has been interposed
3519    os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3520    if (os_sigaction == NULL) return;
3521  }
3522
3523  os_sigaction(sig, (struct sigaction*)NULL, &act);
3524
3525  address thisHandler = (act.sa_flags & SA_SIGINFO)
3526    ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3527    : CAST_FROM_FN_PTR(address, act.sa_handler);
3528
3529
3530  switch(sig) {
3531  case SIGSEGV:
3532  case SIGBUS:
3533  case SIGFPE:
3534  case SIGPIPE:
3535  case SIGILL:
3536  case SIGXFSZ:
3537    // Renamed 'signalHandler' to avoid collision with other shared libs.
3538    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3539    break;
3540
3541  case SHUTDOWN1_SIGNAL:
3542  case SHUTDOWN2_SIGNAL:
3543  case SHUTDOWN3_SIGNAL:
3544  case BREAK_SIGNAL:
3545    jvmHandler = (address)user_handler();
3546    break;
3547
3548  case INTERRUPT_SIGNAL:
3549    jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3550    break;
3551
3552  default:
3553    if (sig == SR_signum) {
3554      jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3555    } else {
3556      return;
3557    }
3558    break;
3559  }
3560
3561  if (thisHandler != jvmHandler) {
3562    tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3563    tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3564    tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3565    // No need to check this sig any longer
3566    sigaddset(&check_signal_done, sig);
3567    // Running under non-interactive shell, SHUTDOWN2_SIGNAL will be reassigned SIG_IGN
3568    if (sig == SHUTDOWN2_SIGNAL && !isatty(fileno(stdin))) {
3569      tty->print_cr("Running in non-interactive shell, %s handler is replaced by shell",
3570                    exception_name(sig, buf, O_BUFLEN));
3571    }
3572  } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3573    tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3574    tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3575    tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
3576    // No need to check this sig any longer
3577    sigaddset(&check_signal_done, sig);
3578  }
3579
3580  // Dump all the signal
3581  if (sigismember(&check_signal_done, sig)) {
3582    print_signal_handlers(tty, buf, O_BUFLEN);
3583  }
3584}
3585
3586extern bool signal_name(int signo, char* buf, size_t len);
3587
3588const char* os::exception_name(int exception_code, char* buf, size_t size) {
3589  if (0 < exception_code && exception_code <= SIGRTMAX) {
3590    // signal
3591    if (!signal_name(exception_code, buf, size)) {
3592      jio_snprintf(buf, size, "SIG%d", exception_code);
3593    }
3594    return buf;
3595  } else {
3596    return NULL;
3597  }
3598}
3599
3600// To install functions for atexit system call
3601extern "C" {
3602  static void perfMemory_exit_helper() {
3603    perfMemory_exit();
3604  }
3605}
3606
3607// This is called _before_ the most of global arguments have been parsed.
3608void os::init(void) {
3609  // This is basic, we want to know if that ever changes.
3610  // (shared memory boundary is supposed to be a 256M aligned)
3611  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3612
3613  // First off, we need to know whether we run on AIX or PASE, and
3614  // the OS level we run on.
3615  os::Aix::initialize_os_info();
3616
3617  // Scan environment (SPEC1170 behaviour, etc)
3618  os::Aix::scan_environment();
3619
3620  // Check which pages are supported by AIX.
3621  os::Aix::query_multipage_support();
3622
3623  // Next, we need to initialize libo4 and libperfstat libraries.
3624  if (os::Aix::on_pase()) {
3625    os::Aix::initialize_libo4();
3626  } else {
3627    os::Aix::initialize_libperfstat();
3628  }
3629
3630  // Reset the perfstat information provided by ODM.
3631  if (os::Aix::on_aix()) {
3632    libperfstat::perfstat_reset();
3633  }
3634
3635  // Now initialze basic system properties. Note that for some of the values we
3636  // need libperfstat etc.
3637  os::Aix::initialize_system_info();
3638
3639  // Initialize large page support.
3640  if (UseLargePages) {
3641    os::large_page_init();
3642    if (!UseLargePages) {
3643      // initialize os::_page_sizes
3644      _page_sizes[0] = Aix::page_size();
3645      _page_sizes[1] = 0;
3646      if (Verbose) {
3647        fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3648      }
3649    }
3650  } else {
3651    // initialize os::_page_sizes
3652    _page_sizes[0] = Aix::page_size();
3653    _page_sizes[1] = 0;
3654  }
3655
3656  // debug trace
3657  if (Verbose) {
3658    fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3659    fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3660    fprintf(stderr, "os::_page_sizes = ( ");
3661    for (int i = 0; _page_sizes[i]; i ++) {
3662      fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3663    }
3664    fprintf(stderr, ")\n");
3665  }
3666
3667  _initial_pid = getpid();
3668
3669  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3670
3671  init_random(1234567);
3672
3673  ThreadCritical::initialize();
3674
3675  // Main_thread points to the aboriginal thread.
3676  Aix::_main_thread = pthread_self();
3677
3678  initial_time_count = os::elapsed_counter();
3679  pthread_mutex_init(&dl_mutex, NULL);
3680}
3681
3682// this is called _after_ the global arguments have been parsed
3683jint os::init_2(void) {
3684
3685  if (Verbose) {
3686    fprintf(stderr, "processor count: %d\n", os::_processor_count);
3687    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
3688  }
3689
3690  // initially build up the loaded dll map
3691  LoadedLibraries::reload();
3692
3693  const int page_size = Aix::page_size();
3694  const int map_size = page_size;
3695
3696  address map_address = (address) MAP_FAILED;
3697  const int prot  = PROT_READ;
3698  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3699
3700  // use optimized addresses for the polling page,
3701  // e.g. map it to a special 32-bit address.
3702  if (OptimizePollingPageLocation) {
3703    // architecture-specific list of address wishes:
3704    address address_wishes[] = {
3705      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3706      // PPC64: all address wishes are non-negative 32 bit values where
3707      // the lower 16 bits are all zero. we can load these addresses
3708      // with a single ppc_lis instruction.
3709      (address) 0x30000000, (address) 0x31000000,
3710      (address) 0x32000000, (address) 0x33000000,
3711      (address) 0x40000000, (address) 0x41000000,
3712      (address) 0x42000000, (address) 0x43000000,
3713      (address) 0x50000000, (address) 0x51000000,
3714      (address) 0x52000000, (address) 0x53000000,
3715      (address) 0x60000000, (address) 0x61000000,
3716      (address) 0x62000000, (address) 0x63000000
3717    };
3718    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3719
3720    // iterate over the list of address wishes:
3721    for (int i=0; i<address_wishes_length; i++) {
3722      // try to map with current address wish.
3723      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3724      // fail if the address is already mapped.
3725      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3726                                     map_size, prot,
3727                                     flags | MAP_FIXED,
3728                                     -1, 0);
3729      if (Verbose) {
3730        fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3731                address_wishes[i], map_address + (ssize_t)page_size);
3732      }
3733
3734      if (map_address + (ssize_t)page_size == address_wishes[i]) {
3735        // map succeeded and map_address is at wished address, exit loop.
3736        break;
3737      }
3738
3739      if (map_address != (address) MAP_FAILED) {
3740        // map succeeded, but polling_page is not at wished address, unmap and continue.
3741        ::munmap(map_address, map_size);
3742        map_address = (address) MAP_FAILED;
3743      }
3744      // map failed, continue loop.
3745    }
3746  } // end OptimizePollingPageLocation
3747
3748  if (map_address == (address) MAP_FAILED) {
3749    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3750  }
3751  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3752  os::set_polling_page(map_address);
3753
3754  if (!UseMembar) {
3755    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3756    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3757    os::set_memory_serialize_page(mem_serialize_page);
3758
3759#ifndef PRODUCT
3760    if (Verbose && PrintMiscellaneous)
3761      tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3762#endif
3763  }
3764
3765  // initialize suspend/resume support - must do this before signal_sets_init()
3766  if (SR_initialize() != 0) {
3767    perror("SR_initialize failed");
3768    return JNI_ERR;
3769  }
3770
3771  Aix::signal_sets_init();
3772  Aix::install_signal_handlers();
3773
3774  // Check minimum allowable stack size for thread creation and to initialize
3775  // the java system classes, including StackOverflowError - depends on page
3776  // size. Add a page for compiler2 recursion in main thread.
3777  // Add in 2*BytesPerWord times page size to account for VM stack during
3778  // class initialization depending on 32 or 64 bit VM.
3779  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3780            (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3781                     2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3782
3783  size_t threadStackSizeInBytes = ThreadStackSize * K;
3784  if (threadStackSizeInBytes != 0 &&
3785      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3786        tty->print_cr("\nThe stack size specified is too small, "
3787                      "Specify at least %dk",
3788                      os::Aix::min_stack_allowed / K);
3789        return JNI_ERR;
3790  }
3791
3792  // Make the stack size a multiple of the page size so that
3793  // the yellow/red zones can be guarded.
3794  // note that this can be 0, if no default stacksize was set
3795  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3796
3797  Aix::libpthread_init();
3798
3799  if (MaxFDLimit) {
3800    // set the number of file descriptors to max. print out error
3801    // if getrlimit/setrlimit fails but continue regardless.
3802    struct rlimit nbr_files;
3803    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3804    if (status != 0) {
3805      if (PrintMiscellaneous && (Verbose || WizardMode))
3806        perror("os::init_2 getrlimit failed");
3807    } else {
3808      nbr_files.rlim_cur = nbr_files.rlim_max;
3809      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3810      if (status != 0) {
3811        if (PrintMiscellaneous && (Verbose || WizardMode))
3812          perror("os::init_2 setrlimit failed");
3813      }
3814    }
3815  }
3816
3817  if (PerfAllowAtExitRegistration) {
3818    // only register atexit functions if PerfAllowAtExitRegistration is set.
3819    // atexit functions can be delayed until process exit time, which
3820    // can be problematic for embedded VM situations. Embedded VMs should
3821    // call DestroyJavaVM() to assure that VM resources are released.
3822
3823    // note: perfMemory_exit_helper atexit function may be removed in
3824    // the future if the appropriate cleanup code can be added to the
3825    // VM_Exit VMOperation's doit method.
3826    if (atexit(perfMemory_exit_helper) != 0) {
3827      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3828    }
3829  }
3830
3831  return JNI_OK;
3832}
3833
3834// this is called at the end of vm_initialization
3835void os::init_3(void) {
3836  return;
3837}
3838
3839// Mark the polling page as unreadable
3840void os::make_polling_page_unreadable(void) {
3841  if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3842    fatal("Could not disable polling page");
3843  }
3844};
3845
3846// Mark the polling page as readable
3847void os::make_polling_page_readable(void) {
3848  // Changed according to os_linux.cpp.
3849  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
3850    fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
3851  }
3852};
3853
3854int os::active_processor_count() {
3855  int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
3856  assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
3857  return online_cpus;
3858}
3859
3860void os::set_native_thread_name(const char *name) {
3861  // Not yet implemented.
3862  return;
3863}
3864
3865bool os::distribute_processes(uint length, uint* distribution) {
3866  // Not yet implemented.
3867  return false;
3868}
3869
3870bool os::bind_to_processor(uint processor_id) {
3871  // Not yet implemented.
3872  return false;
3873}
3874
3875void os::SuspendedThreadTask::internal_do_task() {
3876  if (do_suspend(_thread->osthread())) {
3877    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
3878    do_task(context);
3879    do_resume(_thread->osthread());
3880  }
3881}
3882
3883class PcFetcher : public os::SuspendedThreadTask {
3884public:
3885  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
3886  ExtendedPC result();
3887protected:
3888  void do_task(const os::SuspendedThreadTaskContext& context);
3889private:
3890  ExtendedPC _epc;
3891};
3892
3893ExtendedPC PcFetcher::result() {
3894  guarantee(is_done(), "task is not done yet.");
3895  return _epc;
3896}
3897
3898void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
3899  Thread* thread = context.thread();
3900  OSThread* osthread = thread->osthread();
3901  if (osthread->ucontext() != NULL) {
3902    _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
3903  } else {
3904    // NULL context is unexpected, double-check this is the VMThread.
3905    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
3906  }
3907}
3908
3909// Suspends the target using the signal mechanism and then grabs the PC before
3910// resuming the target. Used by the flat-profiler only
3911ExtendedPC os::get_thread_pc(Thread* thread) {
3912  // Make sure that it is called by the watcher for the VMThread.
3913  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
3914  assert(thread->is_VM_thread(), "Can only be called for VMThread");
3915
3916  PcFetcher fetcher(thread);
3917  fetcher.run();
3918  return fetcher.result();
3919}
3920
3921// Not neede on Aix.
3922// int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
3923// }
3924
3925////////////////////////////////////////////////////////////////////////////////
3926// debug support
3927
3928static address same_page(address x, address y) {
3929  intptr_t page_bits = -os::vm_page_size();
3930  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
3931    return x;
3932  else if (x > y)
3933    return (address)(intptr_t(y) | ~page_bits) + 1;
3934  else
3935    return (address)(intptr_t(y) & page_bits);
3936}
3937
3938bool os::find(address addr, outputStream* st) {
3939
3940  st->print(PTR_FORMAT ": ", addr);
3941
3942  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
3943  if (lib) {
3944    lib->print(st);
3945    return true;
3946  } else {
3947    lib = LoadedLibraries::find_for_data_address(addr);
3948    if (lib) {
3949      lib->print(st);
3950      return true;
3951    } else {
3952      st->print_cr("(outside any module)");
3953    }
3954  }
3955
3956  return false;
3957}
3958
3959////////////////////////////////////////////////////////////////////////////////
3960// misc
3961
3962// This does not do anything on Aix. This is basically a hook for being
3963// able to use structured exception handling (thread-local exception filters)
3964// on, e.g., Win32.
3965void
3966os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
3967                         JavaCallArguments* args, Thread* thread) {
3968  f(value, method, args, thread);
3969}
3970
3971void os::print_statistics() {
3972}
3973
3974int os::message_box(const char* title, const char* message) {
3975  int i;
3976  fdStream err(defaultStream::error_fd());
3977  for (i = 0; i < 78; i++) err.print_raw("=");
3978  err.cr();
3979  err.print_raw_cr(title);
3980  for (i = 0; i < 78; i++) err.print_raw("-");
3981  err.cr();
3982  err.print_raw_cr(message);
3983  for (i = 0; i < 78; i++) err.print_raw("=");
3984  err.cr();
3985
3986  char buf[16];
3987  // Prevent process from exiting upon "read error" without consuming all CPU
3988  while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
3989
3990  return buf[0] == 'y' || buf[0] == 'Y';
3991}
3992
3993int os::stat(const char *path, struct stat *sbuf) {
3994  char pathbuf[MAX_PATH];
3995  if (strlen(path) > MAX_PATH - 1) {
3996    errno = ENAMETOOLONG;
3997    return -1;
3998  }
3999  os::native_path(strcpy(pathbuf, path));
4000  return ::stat(pathbuf, sbuf);
4001}
4002
4003bool os::check_heap(bool force) {
4004  return true;
4005}
4006
4007// int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
4008//   return ::vsnprintf(buf, count, format, args);
4009// }
4010
4011// Is a (classpath) directory empty?
4012bool os::dir_is_empty(const char* path) {
4013  DIR *dir = NULL;
4014  struct dirent *ptr;
4015
4016  dir = opendir(path);
4017  if (dir == NULL) return true;
4018
4019  /* Scan the directory */
4020  bool result = true;
4021  char buf[sizeof(struct dirent) + MAX_PATH];
4022  while (result && (ptr = ::readdir(dir)) != NULL) {
4023    if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4024      result = false;
4025    }
4026  }
4027  closedir(dir);
4028  return result;
4029}
4030
4031// This code originates from JDK's sysOpen and open64_w
4032// from src/solaris/hpi/src/system_md.c
4033
4034#ifndef O_DELETE
4035#define O_DELETE 0x10000
4036#endif
4037
4038// Open a file. Unlink the file immediately after open returns
4039// if the specified oflag has the O_DELETE flag set.
4040// O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4041
4042int os::open(const char *path, int oflag, int mode) {
4043
4044  if (strlen(path) > MAX_PATH - 1) {
4045    errno = ENAMETOOLONG;
4046    return -1;
4047  }
4048  int fd;
4049  int o_delete = (oflag & O_DELETE);
4050  oflag = oflag & ~O_DELETE;
4051
4052  fd = ::open64(path, oflag, mode);
4053  if (fd == -1) return -1;
4054
4055  // If the open succeeded, the file might still be a directory.
4056  {
4057    struct stat64 buf64;
4058    int ret = ::fstat64(fd, &buf64);
4059    int st_mode = buf64.st_mode;
4060
4061    if (ret != -1) {
4062      if ((st_mode & S_IFMT) == S_IFDIR) {
4063        errno = EISDIR;
4064        ::close(fd);
4065        return -1;
4066      }
4067    } else {
4068      ::close(fd);
4069      return -1;
4070    }
4071  }
4072
4073  // All file descriptors that are opened in the JVM and not
4074  // specifically destined for a subprocess should have the
4075  // close-on-exec flag set. If we don't set it, then careless 3rd
4076  // party native code might fork and exec without closing all
4077  // appropriate file descriptors (e.g. as we do in closeDescriptors in
4078  // UNIXProcess.c), and this in turn might:
4079  //
4080  // - cause end-of-file to fail to be detected on some file
4081  //   descriptors, resulting in mysterious hangs, or
4082  //
4083  // - might cause an fopen in the subprocess to fail on a system
4084  //   suffering from bug 1085341.
4085  //
4086  // (Yes, the default setting of the close-on-exec flag is a Unix
4087  // design flaw.)
4088  //
4089  // See:
4090  // 1085341: 32-bit stdio routines should support file descriptors >255
4091  // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4092  // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4093#ifdef FD_CLOEXEC
4094  {
4095    int flags = ::fcntl(fd, F_GETFD);
4096    if (flags != -1)
4097      ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4098  }
4099#endif
4100
4101  if (o_delete != 0) {
4102    ::unlink(path);
4103  }
4104  return fd;
4105}
4106
4107
4108// create binary file, rewriting existing file if required
4109int os::create_binary_file(const char* path, bool rewrite_existing) {
4110  int oflags = O_WRONLY | O_CREAT;
4111  if (!rewrite_existing) {
4112    oflags |= O_EXCL;
4113  }
4114  return ::open64(path, oflags, S_IREAD | S_IWRITE);
4115}
4116
4117// return current position of file pointer
4118jlong os::current_file_offset(int fd) {
4119  return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4120}
4121
4122// move file pointer to the specified offset
4123jlong os::seek_to_file_offset(int fd, jlong offset) {
4124  return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4125}
4126
4127// This code originates from JDK's sysAvailable
4128// from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4129
4130int os::available(int fd, jlong *bytes) {
4131  jlong cur, end;
4132  int mode;
4133  struct stat64 buf64;
4134
4135  if (::fstat64(fd, &buf64) >= 0) {
4136    mode = buf64.st_mode;
4137    if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4138      // XXX: is the following call interruptible? If so, this might
4139      // need to go through the INTERRUPT_IO() wrapper as for other
4140      // blocking, interruptible calls in this file.
4141      int n;
4142      if (::ioctl(fd, FIONREAD, &n) >= 0) {
4143        *bytes = n;
4144        return 1;
4145      }
4146    }
4147  }
4148  if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4149    return 0;
4150  } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4151    return 0;
4152  } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4153    return 0;
4154  }
4155  *bytes = end - cur;
4156  return 1;
4157}
4158
4159int os::socket_available(int fd, jint *pbytes) {
4160  // Linux doc says EINTR not returned, unlike Solaris
4161  int ret = ::ioctl(fd, FIONREAD, pbytes);
4162
4163  //%% note ioctl can return 0 when successful, JVM_SocketAvailable
4164  // is expected to return 0 on failure and 1 on success to the jdk.
4165  return (ret < 0) ? 0 : 1;
4166}
4167
4168// Map a block of memory.
4169char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4170                        char *addr, size_t bytes, bool read_only,
4171                        bool allow_exec) {
4172  Unimplemented();
4173  return NULL;
4174}
4175
4176
4177// Remap a block of memory.
4178char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4179                          char *addr, size_t bytes, bool read_only,
4180                          bool allow_exec) {
4181  // same as map_memory() on this OS
4182  return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4183                        allow_exec);
4184}
4185
4186// Unmap a block of memory.
4187bool os::pd_unmap_memory(char* addr, size_t bytes) {
4188  return munmap(addr, bytes) == 0;
4189}
4190
4191// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4192// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4193// of a thread.
4194//
4195// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4196// the fast estimate available on the platform.
4197
4198jlong os::current_thread_cpu_time() {
4199  // return user + sys since the cost is the same
4200  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4201  assert(n >= 0, "negative CPU time");
4202  return n;
4203}
4204
4205jlong os::thread_cpu_time(Thread* thread) {
4206  // consistent with what current_thread_cpu_time() returns
4207  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4208  assert(n >= 0, "negative CPU time");
4209  return n;
4210}
4211
4212jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4213  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4214  assert(n >= 0, "negative CPU time");
4215  return n;
4216}
4217
4218static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4219  bool error = false;
4220
4221  jlong sys_time = 0;
4222  jlong user_time = 0;
4223
4224  // reimplemented using getthrds64().
4225  //
4226  // goes like this:
4227  // For the thread in question, get the kernel thread id. Then get the
4228  // kernel thread statistics using that id.
4229  //
4230  // This only works of course when no pthread scheduling is used,
4231  // ie there is a 1:1 relationship to kernel threads.
4232  // On AIX, see AIXTHREAD_SCOPE variable.
4233
4234  pthread_t pthtid = thread->osthread()->pthread_id();
4235
4236  // retrieve kernel thread id for the pthread:
4237  tid64_t tid = 0;
4238  struct __pthrdsinfo pinfo;
4239  // I just love those otherworldly IBM APIs which force me to hand down
4240  // dummy buffers for stuff I dont care for...
4241  char dummy[1];
4242  int dummy_size = sizeof(dummy);
4243  if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4244                          dummy, &dummy_size) == 0) {
4245    tid = pinfo.__pi_tid;
4246  } else {
4247    tty->print_cr("pthread_getthrds_np failed.");
4248    error = true;
4249  }
4250
4251  // retrieve kernel timing info for that kernel thread
4252  if (!error) {
4253    struct thrdentry64 thrdentry;
4254    if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4255      sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4256      user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4257    } else {
4258      tty->print_cr("pthread_getthrds_np failed.");
4259      error = true;
4260    }
4261  }
4262
4263  if (p_sys_time) {
4264    *p_sys_time = sys_time;
4265  }
4266
4267  if (p_user_time) {
4268    *p_user_time = user_time;
4269  }
4270
4271  if (error) {
4272    return false;
4273  }
4274
4275  return true;
4276}
4277
4278jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4279  jlong sys_time;
4280  jlong user_time;
4281
4282  if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4283    return -1;
4284  }
4285
4286  return user_sys_cpu_time ? sys_time + user_time : user_time;
4287}
4288
4289void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4290  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4291  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4292  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4293  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4294}
4295
4296void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4297  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
4298  info_ptr->may_skip_backward = false;     // elapsed time not wall time
4299  info_ptr->may_skip_forward = false;      // elapsed time not wall time
4300  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
4301}
4302
4303bool os::is_thread_cpu_time_supported() {
4304  return true;
4305}
4306
4307// System loadavg support. Returns -1 if load average cannot be obtained.
4308// For now just return the system wide load average (no processor sets).
4309int os::loadavg(double values[], int nelem) {
4310
4311  // Implemented using libperfstat on AIX.
4312
4313  guarantee(nelem >= 0 && nelem <= 3, "argument error");
4314  guarantee(values, "argument error");
4315
4316  if (os::Aix::on_pase()) {
4317    Unimplemented();
4318    return -1;
4319  } else {
4320    // AIX: use libperfstat
4321    //
4322    // See also:
4323    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4324    // /usr/include/libperfstat.h:
4325
4326    // Use the already AIX version independent get_cpuinfo.
4327    os::Aix::cpuinfo_t ci;
4328    if (os::Aix::get_cpuinfo(&ci)) {
4329      for (int i = 0; i < nelem; i++) {
4330        values[i] = ci.loadavg[i];
4331      }
4332    } else {
4333      return -1;
4334    }
4335    return nelem;
4336  }
4337}
4338
4339void os::pause() {
4340  char filename[MAX_PATH];
4341  if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4342    jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4343  } else {
4344    jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4345  }
4346
4347  int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4348  if (fd != -1) {
4349    struct stat buf;
4350    ::close(fd);
4351    while (::stat(filename, &buf) == 0) {
4352      (void)::poll(NULL, 0, 100);
4353    }
4354  } else {
4355    jio_fprintf(stderr,
4356      "Could not open pause file '%s', continuing immediately.\n", filename);
4357  }
4358}
4359
4360bool os::Aix::is_primordial_thread() {
4361  if (pthread_self() == (pthread_t)1) {
4362    return true;
4363  } else {
4364    return false;
4365  }
4366}
4367
4368// OS recognitions (PASE/AIX, OS level) call this before calling any
4369// one of Aix::on_pase(), Aix::os_version() static
4370void os::Aix::initialize_os_info() {
4371
4372  assert(_on_pase == -1 && _os_version == -1, "already called.");
4373
4374  struct utsname uts;
4375  memset(&uts, 0, sizeof(uts));
4376  strcpy(uts.sysname, "?");
4377  if (::uname(&uts) == -1) {
4378    fprintf(stderr, "uname failed (%d)\n", errno);
4379    guarantee(0, "Could not determine whether we run on AIX or PASE");
4380  } else {
4381    if (Verbose) {
4382      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4383              "node \"%s\" machine \"%s\"\n",
4384              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4385    }
4386    const int major = atoi(uts.version);
4387    assert(major > 0, "invalid OS version");
4388    const int minor = atoi(uts.release);
4389    assert(minor > 0, "invalid OS release");
4390    _os_version = (major << 8) | minor;
4391    if (strcmp(uts.sysname, "OS400") == 0) {
4392      Unimplemented();
4393    } else if (strcmp(uts.sysname, "AIX") == 0) {
4394      // We run on AIX. We do not support versions older than AIX 5.3.
4395      _on_pase = 0;
4396      if (_os_version < 0x0503) {
4397        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
4398        assert(false, "AIX release too old.");
4399      } else {
4400        if (Verbose) {
4401          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
4402        }
4403      }
4404    } else {
4405      assert(false, "unknown OS");
4406    }
4407  }
4408
4409  guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4410
4411} // end: os::Aix::initialize_os_info()
4412
4413// Scan environment for important settings which might effect the VM.
4414// Trace out settings. Warn about invalid settings and/or correct them.
4415//
4416// Must run after os::Aix::initialue_os_info().
4417void os::Aix::scan_environment() {
4418
4419  char* p;
4420  int rc;
4421
4422  // Warn explicity if EXTSHM=ON is used. That switch changes how
4423  // System V shared memory behaves. One effect is that page size of
4424  // shared memory cannot be change dynamically, effectivly preventing
4425  // large pages from working.
4426  // This switch was needed on AIX 32bit, but on AIX 64bit the general
4427  // recommendation is (in OSS notes) to switch it off.
4428  p = ::getenv("EXTSHM");
4429  if (Verbose) {
4430    fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4431  }
4432  if (p && strcmp(p, "ON") == 0) {
4433    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4434    _extshm = 1;
4435  } else {
4436    _extshm = 0;
4437  }
4438
4439  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4440  // Not tested, not supported.
4441  //
4442  // Note that it might be worth the trouble to test and to require it, if only to
4443  // get useful return codes for mprotect.
4444  //
4445  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4446  // exec() ? before loading the libjvm ? ....)
4447  p = ::getenv("XPG_SUS_ENV");
4448  if (Verbose) {
4449    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
4450  }
4451  if (p && strcmp(p, "ON") == 0) {
4452    _xpg_sus_mode = 1;
4453    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
4454    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4455    // clobber address ranges. If we ever want to support that, we have to do some
4456    // testing first.
4457    guarantee(false, "XPG_SUS_ENV=ON not supported");
4458  } else {
4459    _xpg_sus_mode = 0;
4460  }
4461
4462  // Switch off AIX internal (pthread) guard pages. This has
4463  // immediate effect for any pthread_create calls which follow.
4464  p = ::getenv("AIXTHREAD_GUARDPAGES");
4465  if (Verbose) {
4466    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
4467    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
4468  }
4469  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4470  guarantee(rc == 0, "");
4471
4472} // end: os::Aix::scan_environment()
4473
4474// PASE: initialize the libo4 library (AS400 PASE porting library).
4475void os::Aix::initialize_libo4() {
4476  Unimplemented();
4477}
4478
4479// AIX: initialize the libperfstat library (we load this dynamically
4480// because it is only available on AIX.
4481void os::Aix::initialize_libperfstat() {
4482
4483  assert(os::Aix::on_aix(), "AIX only");
4484
4485  if (!libperfstat::init()) {
4486    fprintf(stderr, "libperfstat initialization failed.\n");
4487    assert(false, "libperfstat initialization failed");
4488  } else {
4489    if (Verbose) {
4490      fprintf(stderr, "libperfstat initialized.\n");
4491    }
4492  }
4493} // end: os::Aix::initialize_libperfstat
4494
4495/////////////////////////////////////////////////////////////////////////////
4496// thread stack
4497
4498// function to query the current stack size using pthread_getthrds_np
4499//
4500// ! do not change anything here unless you know what you are doing !
4501static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4502
4503  // This only works when invoked on a pthread. As we agreed not to use
4504  // primordial threads anyway, I assert here
4505  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4506
4507  // information about this api can be found (a) in the pthread.h header and
4508  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4509  //
4510  // The use of this API to find out the current stack is kind of undefined.
4511  // But after a lot of tries and asking IBM about it, I concluded that it is safe
4512  // enough for cases where I let the pthread library create its stacks. For cases
4513  // where I create an own stack and pass this to pthread_create, it seems not to
4514  // work (the returned stack size in that case is 0).
4515
4516  pthread_t tid = pthread_self();
4517  struct __pthrdsinfo pinfo;
4518  char dummy[1]; // we only need this to satisfy the api and to not get E
4519  int dummy_size = sizeof(dummy);
4520
4521  memset(&pinfo, 0, sizeof(pinfo));
4522
4523  const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4524                                      sizeof(pinfo), dummy, &dummy_size);
4525
4526  if (rc != 0) {
4527    fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4528    guarantee(0, "pthread_getthrds_np failed");
4529  }
4530
4531  guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4532
4533  // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4534  // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4535  // Not sure what to do here - I feel inclined to forbid this use case completely.
4536  guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4537
4538  // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4539  if (p_stack_base) {
4540    (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4541  }
4542
4543  if (p_stack_size) {
4544    (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4545  }
4546
4547#ifndef PRODUCT
4548  if (Verbose) {
4549    fprintf(stderr,
4550            "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4551            ", real stack_size=" INTPTR_FORMAT
4552            ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4553            (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4554            (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4555            pinfo.__pi_stacksize - os::Aix::stack_page_size());
4556  }
4557#endif
4558
4559} // end query_stack_dimensions
4560
4561// get the current stack base from the OS (actually, the pthread library)
4562address os::current_stack_base() {
4563  address p;
4564  query_stack_dimensions(&p, 0);
4565  return p;
4566}
4567
4568// get the current stack size from the OS (actually, the pthread library)
4569size_t os::current_stack_size() {
4570  size_t s;
4571  query_stack_dimensions(0, &s);
4572  return s;
4573}
4574
4575// Refer to the comments in os_solaris.cpp park-unpark.
4576//
4577// Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4578// hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4579// For specifics regarding the bug see GLIBC BUGID 261237 :
4580//    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4581// Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4582// will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4583// is used. (The simple C test-case provided in the GLIBC bug report manifests the
4584// hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4585// and monitorenter when we're using 1-0 locking. All those operations may result in
4586// calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4587// of libpthread avoids the problem, but isn't practical.
4588//
4589// Possible remedies:
4590//
4591// 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4592//      This is palliative and probabilistic, however. If the thread is preempted
4593//      between the call to compute_abstime() and pthread_cond_timedwait(), more
4594//      than the minimum period may have passed, and the abstime may be stale (in the
4595//      past) resultin in a hang. Using this technique reduces the odds of a hang
4596//      but the JVM is still vulnerable, particularly on heavily loaded systems.
4597//
4598// 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4599//      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4600//      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4601//      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4602//      thread.
4603//
4604// 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
4605//      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4606//      a timeout request to the chron thread and then blocking via pthread_cond_wait().
4607//      This also works well. In fact it avoids kernel-level scalability impediments
4608//      on certain platforms that don't handle lots of active pthread_cond_timedwait()
4609//      timers in a graceful fashion.
4610//
4611// 4.   When the abstime value is in the past it appears that control returns
4612//      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4613//      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4614//      can avoid the problem by reinitializing the condvar -- by cond_destroy()
4615//      followed by cond_init() -- after all calls to pthread_cond_timedwait().
4616//      It may be possible to avoid reinitialization by checking the return
4617//      value from pthread_cond_timedwait(). In addition to reinitializing the
4618//      condvar we must establish the invariant that cond_signal() is only called
4619//      within critical sections protected by the adjunct mutex. This prevents
4620//      cond_signal() from "seeing" a condvar that's in the midst of being
4621//      reinitialized or that is corrupt. Sadly, this invariant obviates the
4622//      desirable signal-after-unlock optimization that avoids futile context switching.
4623//
4624//      I'm also concerned that some versions of NTPL might allocate an auxilliary
4625//      structure when a condvar is used or initialized. cond_destroy() would
4626//      release the helper structure. Our reinitialize-after-timedwait fix
4627//      put excessive stress on malloc/free and locks protecting the c-heap.
4628//
4629// We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4630// It may be possible to refine (4) by checking the kernel and NTPL verisons
4631// and only enabling the work-around for vulnerable environments.
4632
4633// utility to compute the abstime argument to timedwait:
4634// millis is the relative timeout time
4635// abstime will be the absolute timeout time
4636// TODO: replace compute_abstime() with unpackTime()
4637
4638static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4639  if (millis < 0) millis = 0;
4640  struct timeval now;
4641  int status = gettimeofday(&now, NULL);
4642  assert(status == 0, "gettimeofday");
4643  jlong seconds = millis / 1000;
4644  millis %= 1000;
4645  if (seconds > 50000000) { // see man cond_timedwait(3T)
4646    seconds = 50000000;
4647  }
4648  abstime->tv_sec = now.tv_sec  + seconds;
4649  long       usec = now.tv_usec + millis * 1000;
4650  if (usec >= 1000000) {
4651    abstime->tv_sec += 1;
4652    usec -= 1000000;
4653  }
4654  abstime->tv_nsec = usec * 1000;
4655  return abstime;
4656}
4657
4658
4659// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4660// Conceptually TryPark() should be equivalent to park(0).
4661
4662int os::PlatformEvent::TryPark() {
4663  for (;;) {
4664    const int v = _Event;
4665    guarantee ((v == 0) || (v == 1), "invariant");
4666    if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4667  }
4668}
4669
4670void os::PlatformEvent::park() {       // AKA "down()"
4671  // Invariant: Only the thread associated with the Event/PlatformEvent
4672  // may call park().
4673  // TODO: assert that _Assoc != NULL or _Assoc == Self
4674  int v;
4675  for (;;) {
4676    v = _Event;
4677    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4678  }
4679  guarantee (v >= 0, "invariant");
4680  if (v == 0) {
4681    // Do this the hard way by blocking ...
4682    int status = pthread_mutex_lock(_mutex);
4683    assert_status(status == 0, status, "mutex_lock");
4684    guarantee (_nParked == 0, "invariant");
4685    ++ _nParked;
4686    while (_Event < 0) {
4687      status = pthread_cond_wait(_cond, _mutex);
4688      assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4689    }
4690    -- _nParked;
4691
4692    // In theory we could move the ST of 0 into _Event past the unlock(),
4693    // but then we'd need a MEMBAR after the ST.
4694    _Event = 0;
4695    status = pthread_mutex_unlock(_mutex);
4696    assert_status(status == 0, status, "mutex_unlock");
4697  }
4698  guarantee (_Event >= 0, "invariant");
4699}
4700
4701int os::PlatformEvent::park(jlong millis) {
4702  guarantee (_nParked == 0, "invariant");
4703
4704  int v;
4705  for (;;) {
4706    v = _Event;
4707    if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4708  }
4709  guarantee (v >= 0, "invariant");
4710  if (v != 0) return OS_OK;
4711
4712  // We do this the hard way, by blocking the thread.
4713  // Consider enforcing a minimum timeout value.
4714  struct timespec abst;
4715  compute_abstime(&abst, millis);
4716
4717  int ret = OS_TIMEOUT;
4718  int status = pthread_mutex_lock(_mutex);
4719  assert_status(status == 0, status, "mutex_lock");
4720  guarantee (_nParked == 0, "invariant");
4721  ++_nParked;
4722
4723  // Object.wait(timo) will return because of
4724  // (a) notification
4725  // (b) timeout
4726  // (c) thread.interrupt
4727  //
4728  // Thread.interrupt and object.notify{All} both call Event::set.
4729  // That is, we treat thread.interrupt as a special case of notification.
4730  // We ignore spurious OS wakeups unless FilterSpuriousWakeups is false.
4731  // We assume all ETIME returns are valid.
4732  //
4733  // TODO: properly differentiate simultaneous notify+interrupt.
4734  // In that case, we should propagate the notify to another waiter.
4735
4736  while (_Event < 0) {
4737    status = pthread_cond_timedwait(_cond, _mutex, &abst);
4738    assert_status(status == 0 || status == ETIMEDOUT,
4739          status, "cond_timedwait");
4740    if (!FilterSpuriousWakeups) break;         // previous semantics
4741    if (status == ETIMEDOUT) break;
4742    // We consume and ignore EINTR and spurious wakeups.
4743  }
4744  --_nParked;
4745  if (_Event >= 0) {
4746     ret = OS_OK;
4747  }
4748  _Event = 0;
4749  status = pthread_mutex_unlock(_mutex);
4750  assert_status(status == 0, status, "mutex_unlock");
4751  assert (_nParked == 0, "invariant");
4752  return ret;
4753}
4754
4755void os::PlatformEvent::unpark() {
4756  int v, AnyWaiters;
4757  for (;;) {
4758    v = _Event;
4759    if (v > 0) {
4760      // The LD of _Event could have reordered or be satisfied
4761      // by a read-aside from this processor's write buffer.
4762      // To avoid problems execute a barrier and then
4763      // ratify the value.
4764      OrderAccess::fence();
4765      if (_Event == v) return;
4766      continue;
4767    }
4768    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4769  }
4770  if (v < 0) {
4771    // Wait for the thread associated with the event to vacate
4772    int status = pthread_mutex_lock(_mutex);
4773    assert_status(status == 0, status, "mutex_lock");
4774    AnyWaiters = _nParked;
4775
4776    if (AnyWaiters != 0) {
4777      // We intentional signal *after* dropping the lock
4778      // to avoid a common class of futile wakeups.
4779      status = pthread_cond_signal(_cond);
4780      assert_status(status == 0, status, "cond_signal");
4781    }
4782    // Mutex should be locked for pthread_cond_signal(_cond).
4783    status = pthread_mutex_unlock(_mutex);
4784    assert_status(status == 0, status, "mutex_unlock");
4785  }
4786
4787  // Note that we signal() _after dropping the lock for "immortal" Events.
4788  // This is safe and avoids a common class of futile wakeups. In rare
4789  // circumstances this can cause a thread to return prematurely from
4790  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4791  // simply re-test the condition and re-park itself.
4792}
4793
4794
4795// JSR166
4796// -------------------------------------------------------
4797
4798//
4799// The solaris and linux implementations of park/unpark are fairly
4800// conservative for now, but can be improved. They currently use a
4801// mutex/condvar pair, plus a a count.
4802// Park decrements count if > 0, else does a condvar wait. Unpark
4803// sets count to 1 and signals condvar. Only one thread ever waits
4804// on the condvar. Contention seen when trying to park implies that someone
4805// is unparking you, so don't wait. And spurious returns are fine, so there
4806// is no need to track notifications.
4807//
4808
4809#define MAX_SECS 100000000
4810//
4811// This code is common to linux and solaris and will be moved to a
4812// common place in dolphin.
4813//
4814// The passed in time value is either a relative time in nanoseconds
4815// or an absolute time in milliseconds. Either way it has to be unpacked
4816// into suitable seconds and nanoseconds components and stored in the
4817// given timespec structure.
4818// Given time is a 64-bit value and the time_t used in the timespec is only
4819// a signed-32-bit value (except on 64-bit Linux) we have to watch for
4820// overflow if times way in the future are given. Further on Solaris versions
4821// prior to 10 there is a restriction (see cond_timedwait) that the specified
4822// number of seconds, in abstime, is less than current_time + 100,000,000.
4823// As it will be 28 years before "now + 100000000" will overflow we can
4824// ignore overflow and just impose a hard-limit on seconds using the value
4825// of "now + 100,000,000". This places a limit on the timeout of about 3.17
4826// years from "now".
4827//
4828
4829static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4830  assert (time > 0, "convertTime");
4831
4832  struct timeval now;
4833  int status = gettimeofday(&now, NULL);
4834  assert(status == 0, "gettimeofday");
4835
4836  time_t max_secs = now.tv_sec + MAX_SECS;
4837
4838  if (isAbsolute) {
4839    jlong secs = time / 1000;
4840    if (secs > max_secs) {
4841      absTime->tv_sec = max_secs;
4842    }
4843    else {
4844      absTime->tv_sec = secs;
4845    }
4846    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4847  }
4848  else {
4849    jlong secs = time / NANOSECS_PER_SEC;
4850    if (secs >= MAX_SECS) {
4851      absTime->tv_sec = max_secs;
4852      absTime->tv_nsec = 0;
4853    }
4854    else {
4855      absTime->tv_sec = now.tv_sec + secs;
4856      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
4857      if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
4858        absTime->tv_nsec -= NANOSECS_PER_SEC;
4859        ++absTime->tv_sec; // note: this must be <= max_secs
4860      }
4861    }
4862  }
4863  assert(absTime->tv_sec >= 0, "tv_sec < 0");
4864  assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
4865  assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
4866  assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
4867}
4868
4869void Parker::park(bool isAbsolute, jlong time) {
4870  // Optional fast-path check:
4871  // Return immediately if a permit is available.
4872  if (_counter > 0) {
4873      _counter = 0;
4874      OrderAccess::fence();
4875      return;
4876  }
4877
4878  Thread* thread = Thread::current();
4879  assert(thread->is_Java_thread(), "Must be JavaThread");
4880  JavaThread *jt = (JavaThread *)thread;
4881
4882  // Optional optimization -- avoid state transitions if there's an interrupt pending.
4883  // Check interrupt before trying to wait
4884  if (Thread::is_interrupted(thread, false)) {
4885    return;
4886  }
4887
4888  // Next, demultiplex/decode time arguments
4889  timespec absTime;
4890  if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
4891    return;
4892  }
4893  if (time > 0) {
4894    unpackTime(&absTime, isAbsolute, time);
4895  }
4896
4897
4898  // Enter safepoint region
4899  // Beware of deadlocks such as 6317397.
4900  // The per-thread Parker:: mutex is a classic leaf-lock.
4901  // In particular a thread must never block on the Threads_lock while
4902  // holding the Parker:: mutex. If safepoints are pending both the
4903  // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
4904  ThreadBlockInVM tbivm(jt);
4905
4906  // Don't wait if cannot get lock since interference arises from
4907  // unblocking. Also. check interrupt before trying wait
4908  if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
4909    return;
4910  }
4911
4912  int status;
4913  if (_counter > 0) { // no wait needed
4914    _counter = 0;
4915    status = pthread_mutex_unlock(_mutex);
4916    assert (status == 0, "invariant");
4917    OrderAccess::fence();
4918    return;
4919  }
4920
4921#ifdef ASSERT
4922  // Don't catch signals while blocked; let the running threads have the signals.
4923  // (This allows a debugger to break into the running thread.)
4924  sigset_t oldsigs;
4925  sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
4926  pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
4927#endif
4928
4929  OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
4930  jt->set_suspend_equivalent();
4931  // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
4932
4933  if (time == 0) {
4934    status = pthread_cond_wait (_cond, _mutex);
4935  } else {
4936    status = pthread_cond_timedwait (_cond, _mutex, &absTime);
4937    if (status != 0 && WorkAroundNPTLTimedWaitHang) {
4938      pthread_cond_destroy (_cond);
4939      pthread_cond_init    (_cond, NULL);
4940    }
4941  }
4942  assert_status(status == 0 || status == EINTR ||
4943                status == ETIME || status == ETIMEDOUT,
4944                status, "cond_timedwait");
4945
4946#ifdef ASSERT
4947  pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
4948#endif
4949
4950  _counter = 0;
4951  status = pthread_mutex_unlock(_mutex);
4952  assert_status(status == 0, status, "invariant");
4953  // If externally suspended while waiting, re-suspend
4954  if (jt->handle_special_suspend_equivalent_condition()) {
4955    jt->java_suspend_self();
4956  }
4957
4958  OrderAccess::fence();
4959}
4960
4961void Parker::unpark() {
4962  int s, status;
4963  status = pthread_mutex_lock(_mutex);
4964  assert (status == 0, "invariant");
4965  s = _counter;
4966  _counter = 1;
4967  if (s < 1) {
4968    if (WorkAroundNPTLTimedWaitHang) {
4969      status = pthread_cond_signal (_cond);
4970      assert (status == 0, "invariant");
4971      status = pthread_mutex_unlock(_mutex);
4972      assert (status == 0, "invariant");
4973    } else {
4974      status = pthread_mutex_unlock(_mutex);
4975      assert (status == 0, "invariant");
4976      status = pthread_cond_signal (_cond);
4977      assert (status == 0, "invariant");
4978    }
4979  } else {
4980    pthread_mutex_unlock(_mutex);
4981    assert (status == 0, "invariant");
4982  }
4983}
4984
4985
4986extern char** environ;
4987
4988// Run the specified command in a separate process. Return its exit value,
4989// or -1 on failure (e.g. can't fork a new process).
4990// Unlike system(), this function can be called from signal handler. It
4991// doesn't block SIGINT et al.
4992int os::fork_and_exec(char* cmd) {
4993  char * argv[4] = {"sh", "-c", cmd, NULL};
4994
4995  pid_t pid = fork();
4996
4997  if (pid < 0) {
4998    // fork failed
4999    return -1;
5000
5001  } else if (pid == 0) {
5002    // child process
5003
5004    // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
5005    execve("/usr/bin/sh", argv, environ);
5006
5007    // execve failed
5008    _exit(-1);
5009
5010  } else  {
5011    // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5012    // care about the actual exit code, for now.
5013
5014    int status;
5015
5016    // Wait for the child process to exit.  This returns immediately if
5017    // the child has already exited. */
5018    while (waitpid(pid, &status, 0) < 0) {
5019        switch (errno) {
5020        case ECHILD: return 0;
5021        case EINTR: break;
5022        default: return -1;
5023        }
5024    }
5025
5026    if (WIFEXITED(status)) {
5027       // The child exited normally; get its exit code.
5028       return WEXITSTATUS(status);
5029    } else if (WIFSIGNALED(status)) {
5030       // The child exited because of a signal
5031       // The best value to return is 0x80 + signal number,
5032       // because that is what all Unix shells do, and because
5033       // it allows callers to distinguish between process exit and
5034       // process death by signal.
5035       return 0x80 + WTERMSIG(status);
5036    } else {
5037       // Unknown exit code; pass it through
5038       return status;
5039    }
5040  }
5041  // Remove warning.
5042  return -1;
5043}
5044
5045// is_headless_jre()
5046//
5047// Test for the existence of xawt/libmawt.so or libawt_xawt.so
5048// in order to report if we are running in a headless jre.
5049//
5050// Since JDK8 xawt/libmawt.so is moved into the same directory
5051// as libawt.so, and renamed libawt_xawt.so
5052bool os::is_headless_jre() {
5053  struct stat statbuf;
5054  char buf[MAXPATHLEN];
5055  char libmawtpath[MAXPATHLEN];
5056  const char *xawtstr  = "/xawt/libmawt.so";
5057  const char *new_xawtstr = "/libawt_xawt.so";
5058
5059  char *p;
5060
5061  // Get path to libjvm.so
5062  os::jvm_path(buf, sizeof(buf));
5063
5064  // Get rid of libjvm.so
5065  p = strrchr(buf, '/');
5066  if (p == NULL) return false;
5067  else *p = '\0';
5068
5069  // Get rid of client or server
5070  p = strrchr(buf, '/');
5071  if (p == NULL) return false;
5072  else *p = '\0';
5073
5074  // check xawt/libmawt.so
5075  strcpy(libmawtpath, buf);
5076  strcat(libmawtpath, xawtstr);
5077  if (::stat(libmawtpath, &statbuf) == 0) return false;
5078
5079  // check libawt_xawt.so
5080  strcpy(libmawtpath, buf);
5081  strcat(libmawtpath, new_xawtstr);
5082  if (::stat(libmawtpath, &statbuf) == 0) return false;
5083
5084  return true;
5085}
5086
5087// Get the default path to the core file
5088// Returns the length of the string
5089int os::get_core_path(char* buffer, size_t bufferSize) {
5090  const char* p = get_current_directory(buffer, bufferSize);
5091
5092  if (p == NULL) {
5093    assert(p != NULL, "failed to get current directory");
5094    return 0;
5095  }
5096
5097  return strlen(buffer);
5098}
5099
5100#ifndef PRODUCT
5101void TestReserveMemorySpecial_test() {
5102  // No tests available for this platform
5103}
5104#endif
5105